[
  {
    "path": ".buildpacks",
    "content": "https://github.com/mcollina/heroku-buildpack-graphicsmagick\nhttps://github.com/kr/heroku-buildpack-go.git\n"
  },
  {
    "path": ".gitignore",
    "content": "# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n*.test\n*.prof\n\nImgurGo\ntags\n\n*.sw[o-p]\n\nprofile.cov\n"
  },
  {
    "path": ".travis.yml",
    "content": "sudo: required\n\nservices:\n  - docker\n\nbefore_install:\n  - docker build -t imgur/mandible .\n\nscript:\n  - docker run -e \"COVERALLS_TOKEN=$COVERALLS_TOKEN\" -e \"TRAVIS_JOB_ID=$TRAVIS_JOB_ID\" imgur/mandible /bin/sh -c \"cd /go/src/github.com/Imgur/mandible && ./goclean.sh\"\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM golang:1.8-stretch\nRUN apt-get update && apt-get install -yqq aspell aspell-en libaspell-dev tesseract-ocr tesseract-ocr-eng libc6 optipng exiftool libjpeg-progs webp\nADD docker/build_gm.sh /tmp/build_gm.sh\nRUN bash /tmp/build_gm.sh\nADD docker/meme.traineddata /usr/share/tesseract-ocr/tessdata/meme.traineddata\nRUN mkdir -p /etc/mandible /tmp/imagestore\nENV MANDIBLE_CONF /etc/mandible/conf.json\nADD . /go/src/github.com/Imgur/mandible\nWORKDIR /go/src/github.com/Imgur/mandible\nRUN go get github.com/mattn/goveralls\nRUN go get github.com/tools/godep\nRUN godep restore\nRUN godep go install -v .\nCMD [\"mandible\"]\n"
  },
  {
    "path": "Godeps/Godeps.json",
    "content": "{\n\t\"ImportPath\": \"github.com/Imgur/mandible\",\n\t\"GoVersion\": \"go1.5\",\n\t\"Packages\": [\n\t\t\"./...\"\n\t],\n\t\"Deps\": [\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/bradfitz/http2\",\n\t\t\t\"Rev\": \"f8202bc903bda493ebba4aa54922d78430c2c42f\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/golang/glog\",\n\t\t\t\"Rev\": \"44145f04b68cf362d9c4df2182967c2275eaefed\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/golang/protobuf/proto\",\n\t\t\t\"Rev\": \"34a5f244f1c01cdfee8e60324258cfbb97a42aec\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/gorilla/context\",\n\t\t\t\"Rev\": \"215affda49addc4c8ef7e2534915df2c8c35c6cd\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/gorilla/mux\",\n\t\t\t\"Rev\": \"47e8f450ef38c857cdd922ec08862ca9d65a1c6d\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/mitchellh/goamz/aws\",\n\t\t\t\"Rev\": \"2441a8d0fab90553ec345cfdf3db24bb61ea61c3\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/mitchellh/goamz/s3\",\n\t\t\t\"Rev\": \"2441a8d0fab90553ec345cfdf3db24bb61ea61c3\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/trustmaster/go-aspell\",\n\t\t\t\"Rev\": \"b1cc0c2c49f83195f1708a1e6d23967d94817296\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/vaughan0/go-ini\",\n\t\t\t\"Rev\": \"a98ad7ee00ec53921f08832bc06ecf7fd600e6a1\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"golang.org/x/crypto/ssh/terminal\",\n\t\t\t\"Rev\": \"3760e016850398b85094c4c99e955b8c3dea5711\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"golang.org/x/net/context\",\n\t\t\t\"Rev\": \"84afb0af0050ae286aa9ced0c29383c2a866a925\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"golang.org/x/oauth2\",\n\t\t\t\"Rev\": \"b5adcc2dcdf009d0391547edc6ecbaff889f5bb9\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/api/bigquery/v2\",\n\t\t\t\"Rev\": \"0610a35668fd6881bec389e74208f0df92010e96\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/api/container/v1beta1\",\n\t\t\t\"Rev\": \"0610a35668fd6881bec389e74208f0df92010e96\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/api/googleapi\",\n\t\t\t\"Rev\": \"0610a35668fd6881bec389e74208f0df92010e96\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/api/pubsub/v1beta2\",\n\t\t\t\"Rev\": \"0610a35668fd6881bec389e74208f0df92010e96\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/api/storage/v1\",\n\t\t\t\"Rev\": \"0610a35668fd6881bec389e74208f0df92010e96\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/appengine\",\n\t\t\t\"Rev\": \"6bde959377a90acb53366051d7d587bfd7171354\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/cloud\",\n\t\t\t\"Rev\": \"0b21ed5434dc279f2b8ea3c02dc69135600bbb8b\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"google.golang.org/grpc\",\n\t\t\t\"Rev\": \"d6f8134fd2e79a0a2a40f284d5552065fb6a8e3c\"\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "Godeps/Readme",
    "content": "This directory tree is generated automatically by godep.\n\nPlease do not edit.\n\nSee https://github.com/tools/godep for more information.\n"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2015 Imgur, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Procfile",
    "content": "web: ImgurGo\n"
  },
  {
    "path": "README.md",
    "content": "# mandible ![TravisStatus](https://travis-ci.org/Imgur/mandible.svg) [![Coverage Status](https://coveralls.io/repos/Imgur/mandible/badge.svg)](https://coveralls.io/r/Imgur/mandible)\n\nA ready-to-deploy uploader that you can run on AWS EC2 or Heroku. It accepts an image via a REST interface and returns information about a file. Also, supports processing steps such as compression and thumbnail generation.\n\n\n[![Deploy](https://www.herokucdn.com/deploy/button.png)](https://heroku.com/deploy)\n\n\n## Features:\nSupported file types\n- JPG\n- PNG\n- GIF\n\nPluggable storage layers\n- S3\n- Local\n\nPluggable authentication scheme\n- Time-grant HMAC\n\nProcessing Steps:\n- Compression\n- Thumbnail generation\n\n## Installation\n\n### Docker\n\nPull down the mandible config file and edit it:\n\n```\nwget https://raw.githubusercontent.com/Imgur/mandible/master/config/default.conf.json -O ~/mandible/conf.json\n```\n```\nvim ~/mandible/conf.json\n```\n\nTo start mandible (port settings could change based on your conf.json):\n\n```\ndocker run --name mandible -v ~/mandible:/etc/mandible -d -p 80:8080 imgur/mandible\n```\n\nTo stop mandible:\n\n```\ndocker stop mandible\n```\n\nTo run it again:\n\n```\ndocker run mandible\n```\n### (Optional) Authentication\n\n- Set the following environment variable\n    - AUTHENTICATION_HMAC_KEY\n\n### S3 Storage Layer\nAdd the following to the `Stores` array in your conf.json file:\n\n```\n    {\n        \"Type\" : \"s3\",\n        \"BucketName\" : \"\",\n        \"AWSKey\": \"\",\n        \"AWSSecret\": \"\",\n        \"StoreRoot\" : \"\",\n        \"Region\" : \"us-east-1\",\n        \"NamePathRegex\" : \"\",\n        \"NamePathMap\" : \"${ImageSize}/${ImageName}\"\n    }\n```\n\n\n## REST API:\n\nInterfacing with mandible is extremely simple:\n\n### Upload an image file:\n`POST /file`\n\nwith the following multi-part/form-data\n- ```image``` - file\n\n---\n### Upload an image from a URL:\n`POST /url`\n\nwith the following multi-part/form-data\n- ```image``` - string\n\n---\n### Upload an image from base64 data:\n`POST /base64`\n\nwith the following multi-part/form-data\n- ```image``` - image encoded as base64 data\n\n---\n### Thumbnail generation during upload:\n\n**To generate thumbnails with an upload request, pass the following JSON as form-data, keyed under `thumbs`**\n\n```javascript\n{\n    \"name1\": {\n        \"width\": x,\n        \"height\": y,\n        \"shape\": (\"square\" | \"thumb\" | \"circle\")\n    },\n    \"name2\": {\n        \"width\": x2,\n        \"height\": y2,\n        \"shape\": (\"square\" | \"thumb\" | \"circle\")\n    },\n\n    ...\n}\n```\n\nNote: Square thumbnails don't preserve aspect ratio, whereas the 'thumb' type does\n\n---\n### On the fly thumbnail generation:\n**this will return `content-type: image/...` and serve up a thumbnail.**\n\n`GET /thumbnail`\n\nwith the following get parameters:\n- ```uid``` - Unique ID of the image\n- ```thumbs``` - JSON of the following format:\n```javascript\n{\n    \"name for the thumbnail\": {\n        \"shape\": (\"square\" | \"thumb\" | \"circle\" | \"custom\") // required\n        \"width\": int,\n        \"height\": int,\n        \"max_width\": int,\n        \"max_height\": int,\n        \"crop_gravity\": string, // e.g. \"nw\" for north west of the image\n        \"crop_height\": int,\n        \"crop_width\": int,\n        \"quaity\": int,\n        \"crop_ratio\": string, // e.g. \"2:1\"\n        \"format\": string, // one of: jpg, png, gif, webm,\n        \"nostore\": bool, // if true, the resulting thumbnail won't be added to the backing storage\n    }\n}\n```\n\n---\n### OCR endpoint\n**Runs OCR on the given image and returns text**\n\n`GET /ocr`\n\nwith the following get parameters:\n- ```uid``` - Unique ID of the image\n\nreturns:\n```Javascript\n{\n    \"hash\": string, //uid of the image\n    \"ocrtext\": string // text returned from OCR\n}\n```\n\n## Example usage (assuming localhost)\n\n### URL Upload with thumbnails:\n\n```\ncurl -i http://127.0.0.1:8080/url \\\n-d 'image=http://i.imgur.com/s9zxmYe.jpg' \\\n-d 'thumbs={\"small\": {\"width\": 20, \"height\": 20, \"shape\": \"square\"}, \"profile\": {\"width\": 50, \"height\": 50, \"shape\": \"circle\"}}'\n```\n### Response:\n\n```javascript\n{\n    \"data\": {\n        \"width\": 380,\n        \"height\": 430,\n        \"link\": \"https://s3.amazonaws.com/gophergala/original/CUqU4If\",\n        \"mime\": \"image/jpeg\",\n        \"name\": \"\",\n        \"size\": 82199,\n        \"thumbs\": {\n            \"profile\":\"https://s3.amazonaws.com/gophergala/t/CUqU4If/profile\",\n            \"small\": \"https://s3.amazonaws.com/gophergala/t/CUqU4If/small\"\n        }\n    },\n    \"status\": 200,\n    \"success\": true\n}\n```\n### File Upload with thumbnails:\n\n```\ncurl -i http://127.0.0.1:8080/file \\\n-F 'image=@/tmp/cat.gif' \\\n-F 'thumbs={\"small\": {\"width\": 20, \"height\": 20, \"shape\": \"square\"}}'\n```\n### Response:\n\n```javascript\n{\n    \"data\": {\n        \"width\": 354,\n        \"height\": 200,\n        \"link\": \"https://s3.amazonaws.com/gophergala/original/L4ASjMX\",\n        \"mime\": \"image/gif\",\n        \"name\": \"cat.gif\",\n        \"size\": 3511100,\n        \"thumbs\": {\n            \"small\":\"https://s3.amazonaws.com/gophergala/t/L4ASjMX/small\"\n        }\n    },\n    \"status\": 200,\n    \"success\": true\n}\n```\n\n### Authenticated upload\n\nUses HTTP headers `Authentication` and `X-Authentication-HMAC`. Generate HMACs by base64-encoding a JSON blob like below. [Example MAC generator](http://play.golang.org/p/3otGr8LBZt).\nSupplying the client with the Authentication blob and MAC is out of scope for this project. In the future we will support symmetric and asymmetric encryption of the authentication blobs.\n\n#### Request to my own account with proper authorization:\n\n```\ncurl -i http://127.0.0.1:8080/user/1/url \\\n-d 'image=http://i.imgur.com/s9zxmYe.jpg' \\\n-H 'Authorization: {\"user_id\":1,\"grant_time\":\"2010-06-01T00:00:00Z\",\"grant_duration_sec\":31536000}' \\\n-H 'X-Authorization-HMAC: tCtGb04n4nvd/94+Xd6vAx9+pJw51ZmX1vH7E+BlTtc='\n```\n\n#### Response:\n```javascript\n{\"data\":{\"link\":\"/tmp/original/J/a/Jafq9IH\",\"mime\":\"image/jpeg\",\"name\":\"s9zxmYe.jpg\",\"hash\":\"Jafq9IH\",\"size\":81881,\"width\":380,\"height\":430,\"ocrtext\":\"change\\np.roject .\\n\\n  \\n  \\n\\n  forg@ot to git p.ull before\\n- .-+#~+):,-r,ad)q..,i,ng so/ /\",\"thumbs\":{},\"user_id\":\"\\u0001\"},\"status\":200,\"success\":true}\n```\n\n#### Request to other user's account:\n\n```\ncurl -i http://127.0.0.1:8080/user/2/url \\\n-d 'image=http://i.imgur.com/s9zxmYe.jpg' \\\n-H 'Authorization: {\"user_id\":1,\"grant_time\":\"2010-06-01T00:00:00Z\",\"grant_duration_sec\":31536000}' \\\n-H 'X-Authorization-HMAC: tCtGb04n4nvd/94+Xd6vAx9+pJw51ZmX1vH7E+BlTtc='\n```\n\n#### Response:\n```\nHTTP/1.1 401 Unauthorized\nDate: Mon, 08 Jun 2015 21:04:41 GMT\nContent-Length: 0\nContent-Type: text/plain; charset=utf-8\n```\n\n\n#### HMAC prevents account forgery\n\n```\ncurl -i http://127.0.0.1:8080/user/1/url \\\n-d 'image=http://i.imgur.com/s9zxmYe.jpg' \\\n-H 'Authorization: {\"user_id\":1,\"grant_time\":\"2010-06-01T00:00:00Z\",\"grant_duration_sec\":31536000}' \\\n-H 'X-Authorization-HMAC: foobar'\n```\n\n#### Response:\n```javascript\nHTTP/1.1 401 Unauthorized\nDate: Mon, 08 Jun 2015 21:04:41 GMT\nContent-Length: 0\nContent-Type: text/plain; charset=utf-8\n```\n\n## Contributing\n\nThe easiest way to develop on this project is to use the built-in docker image. We are using the Go 1.5 vendor experiment, which means if\nyou import a package you must vendor the source code into this repository using Godep.\n"
  },
  {
    "path": "app.json",
    "content": "{\n  \"name\": \"ImgurGo\",\n  \"description\": \"An easy Heroku image uploading service\",\n  \"repository\": \"https://github.com/gophergala/ImgurGo\",\n  \"env\": {\n      \"BUILDPACK_URL\": \"https://github.com/ddollar/heroku-buildpack-multi\",\n      \"IMGUR_GO_CONF\": \"config/default.conf.json\",\n      \"S3_BUCKET\": {\n          \"description\": \"AWS S3 Bucket\",\n          \"required\": false\n      },\n      \"AWS_ACCESS_KEY_ID\": {\n          \"description\": \"AWS Acess Key ID\",\n          \"required\": false\n      },\n      \"AWS_SECRET_ACCESS_KEY\": {\n          \"description\": \"AWS Acess Key Secret\",\n          \"required\": false\n      }\n  }\n}\n"
  },
  {
    "path": "config/config.go",
    "content": "package config\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n)\n\ntype Configuration struct {\n\tMaxFileSize     int64\n\tHashLength      int\n\tUserAgent       string\n\tStores          []map[string]string\n\tPort            int\n\tDatadogEnabled  bool\n\tDatadogHostname string\n}\n\nfunc NewConfiguration(path string) *Configuration {\n\tfile, err := os.Open(path)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Error opening config file!\")\n\t\tos.Exit(-1)\n\t}\n\n\tdecoder := json.NewDecoder(file)\n\tconfiguration := &Configuration{}\n\terr = decoder.Decode(configuration)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error loading config file: \", err)\n\t}\n\n\treturn configuration\n}\n"
  },
  {
    "path": "config/default.conf.json",
    "content": "{\n    \"Port\": 8080,\n    \"MaxFileSize\": 20971520,\n    \"HashLength\": 7,\n    \"UserAgent\": \"ImgurGo (https://github.com/gophergala/ImgurGo)\",\n    \"Stores\" : [\n        {\n            \"Type\" : \"s3\",\n            \"BucketName\" : \"\",\n            \"AWSKey\": \"\",\n            \"AWSSecret\": \"\",\n            \"StoreRoot\" : \"\",\n            \"Region\" : \"us-east-1\",\n            \"NamePathRegex\" : \"\",\n            \"NamePathMap\" : \"${ImageSize}/${ImageName}\"\n        },\n        {\n            \"Type\" : \"gcs\",\n            \"BucketName\" : \"\",\n            \"StoreRoot\" : \"\",\n            \"AppID\" : \"\",\n            \"KeyFile\" : \"appid.json\",\n            \"NamePathRegex\" : \"\",\n            \"NamePathMap\" : \"${ImageSize}/${ImageName}\"\n        },\n        {\n            \"Type\" : \"local\",\n            \"StoreRoot\": \"/Users/jarvis/imagestore\",\n            \"NamePathRegex\" : \"^([a-zA-Z0-9])([a-zA-Z0-9]).*\",\n            \"NamePathMap\" : \"${ImageSize}/${1}/${2}/${ImageName}\"\n        }\n    ],\n    \"DatadogEnabled\": false,\n    \"DatadogHostname\": \"127.0.0.1\"\n}\n"
  },
  {
    "path": "docker/build_gm.sh",
    "content": "#!/bin/bash\n\napt-get install -y libjpeg-dev liblcms2-dev libwmf-dev libx11-dev libsm-dev libice-dev libxext-dev x11proto-core-dev libxml2-dev libfreetype6-dev libexif-dev libbz2-dev libtiff-dev libjbig-dev zlib1g-dev libpng-dev libwebp-dev ghostscript gsfonts autotools-dev transfig sharutils libltdl-dev mercurial cmake\nwget \"http://www.ece.uvic.ca/~frodo/jasper/software/jasper-2.0.12.tar.gz\" -O jasper.tar.gz\nmkdir jasper && tar -xvzf jasper.tar.gz -C jasper --strip-components 1 && cd jasper\nmkdir BUILD && cd BUILD && cmake -DCMAKE_INSTALL_PREFIX=/usr    \\\n      -DCMAKE_BUILD_TYPE=Release     \\\n      -DCMAKE_SKIP_INSTALL_RPATH=YES \\\n      -DCMAKE_INSTALL_DOCDIR=/usr/share/doc/jasper-2.0.10 \\\n      ..  &&\nmake\nmake install\ncd ../.. && rm -rf jasper jasper.tar.gz\nhg clone http://hg.code.sf.net/p/graphicsmagick/code GM\ncd GM\nhg update -r tip\nCC=\"gcc\" CFLAGS=\"-fopenmp -Wall -g -fno-strict-aliasing -O3 -Wall -pthread\" CPPFLAGS=\"-I/usr/include/X11 -I/usr/include/freetype2 -I/usr/include/libxml2\" CXX=\"g++\" CXXFLAGS=\"-Wall -g -fno-strict-aliasing -O3 -pthread\" LDFLAGS=\"-L/usr/lib/X11 -L/usr/lib/x86_64-linux-gnu\" LIBS=\"-ljbig -lwebp -llcms2 -ltiff -lfreetype -ljpeg -lpng16 -lwmflite -lXext -lSM -lICE -lX11 -llzma -lbz2 -lxml2 -lz -lm -lgomp -lpthread\" ./configure  '--build' 'x86_64-linux-gnu' '--enable-shared' '--enable-static' '--enable-libtool-verbose' '--prefix=/usr' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' '--docdir=${prefix}/share/doc/graphicsmagick' '--with-gs-font-dir=/usr/share/fonts/type1/gsfonts' '--with-x' '--x-includes=/usr/include/X11' '--x-libraries=/usr/lib/X11' '--with-included-ltdl' '--with-modules' '--enable-openmp-slow' '--without-dps' '--without-frozenpaths' '--with-webp' '--with-perl' '--with-perl-options=INSTALLDIRS=vendor' '--enable-quantum-library-names' '--with-quantum-depth=16' 'build_alias=x86_64-linux-gnu' 'CFLAGS=-Wall -g -fno-strict-aliasing -O3' 'LDFLAGS=' 'CXXFLAGS=-Wall -g -fno-strict-aliasing -O3'\nmake\nmake install\ncd .. && rm -rf GM\n"
  },
  {
    "path": "docker/conf.json",
    "content": "{\n    \"Port\": 8080,\n    \"MaxFileSize\": 20971520,\n    \"HashLength\": 7,\n    \"UserAgent\": \"Mandible (https://github.com/Imgur/Mandible)\",\n    \"Stores\" : [\n        {\n            \"Type\" : \"local\",\n            \"StoreRoot\": \"/tmp/imagestore\",\n            \"NamePathRegex\" : \"^([a-zA-Z0-9])([a-zA-Z0-9]).*\",\n            \"NamePathMap\" : \"${ImageSize}/${1}/${2}/${ImageName}\"\n        }\n    ]\n}\n"
  },
  {
    "path": "goclean.sh",
    "content": "#!/bin/bash\n# The script does automatic checking on a Go package and its sub-packages, including:\n# 1. gofmt         (http://golang.org/cmd/gofmt/)\n# 2. goimports     (https://github.com/bradfitz/goimports)\n# 3. golint        (https://github.com/golang/lint)\n# 4. go vet        (http://golang.org/cmd/vet)\n# 5. race detector (http://blog.golang.org/race-detector)\n# 6. test coverage (http://blog.golang.org/cover)\n\nexport GO15VENDOREXPERIMENT=1\n\nset -e\n\nPROJECTS=\"./uploadedfile ./server ./imageprocessor ./imagestore ./config .\"\n\n# Automatic checks\ntest -z \"$(gofmt -l -w .     | tee /dev/stderr)\"\n# test -z \"$(goimports -l -w . | tee /dev/stderr)\"\n# test -z \"$(golint .          | tee /dev/stderr)\"\ngodep go vet $PROJECTS\ngodep go test -race $PROJECTS\n\n# Run test coverage on each subdirectories and merge the coverage profile.\n\necho \"mode: count\" > profile.cov\n\n# Standard go tooling behavior is to ignore dirs with leading underscors\nfor dir in $PROJECTS\ndo\nif ls $dir/*.go &> /dev/null; then\n    godep go test -covermode=count -coverprofile=$dir/profile.tmp $dir\n    if [ -f $dir/profile.tmp ]\n    then\n        cat $dir/profile.tmp | tail -n +2 >> profile.cov\n        rm $dir/profile.tmp\n    fi\nfi\ndone\n\ngodep go tool cover -func profile.cov\n\n# This is breaking travis-ci. Disabling it for now.\n# [ ${COVERALLS_TOKEN} ] && goveralls -coverprofile=profile.cov -service travis-ci -repotoken $COVERALLS_TOKEN\n"
  },
  {
    "path": "imageprocessor/compresslosslessly.go",
    "content": "package imageprocessor\n\nimport (\n\t\"errors\"\n\n\t\"github.com/Imgur/mandible/imageprocessor/processorcommand\"\n\t\"github.com/Imgur/mandible/uploadedfile\"\n)\n\ntype CompressLosslessly struct{}\n\nfunc (this *CompressLosslessly) Process(image *uploadedfile.UploadedFile) error {\n\tif image.IsJpeg() {\n\t\treturn this.compressJpeg(image)\n\t}\n\n\tif image.IsPng() {\n\t\treturn this.compressPng(image)\n\t}\n\n\tif image.IsGif() {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Unsuported filetype\")\n}\n\nfunc (this *CompressLosslessly) String() string {\n\treturn \"Lossy compressor\"\n}\n\nfunc (this *CompressLosslessly) compressPng(image *uploadedfile.UploadedFile) error {\n\tfilename, err := processorcommand.Optipng(image.GetPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage.SetPath(filename)\n\n\treturn nil\n}\n\nfunc (this *CompressLosslessly) compressJpeg(image *uploadedfile.UploadedFile) error {\n\tfilename, err := processorcommand.Jpegtran(image.GetPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage.SetPath(filename)\n\n\treturn nil\n}\n"
  },
  {
    "path": "imageprocessor/exifstripper.go",
    "content": "package imageprocessor\n\nimport (\n\t\"github.com/Imgur/mandible/imageprocessor/processorcommand\"\n\t\"github.com/Imgur/mandible/uploadedfile\"\n)\n\ntype ExifStripper struct{}\n\nfunc (this *ExifStripper) Process(image *uploadedfile.UploadedFile) error {\n\tif !image.IsJpeg() {\n\t\treturn nil\n\t}\n\n\terr := processorcommand.StripMetadata(image.GetPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (this *ExifStripper) String() string {\n\treturn \"EXIF stripper\"\n}\n"
  },
  {
    "path": "imageprocessor/imageorienter.go",
    "content": "package imageprocessor\n\nimport (\n\t\"github.com/Imgur/mandible/imageprocessor/processorcommand\"\n\t\"github.com/Imgur/mandible/uploadedfile\"\n)\n\ntype ImageOrienter struct{}\n\nfunc (this *ImageOrienter) Process(image *uploadedfile.UploadedFile) error {\n\tfilename, err := processorcommand.FixOrientation(image.GetPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage.SetPath(filename)\n\n\treturn nil\n}\n\nfunc (this *ImageOrienter) String() string {\n\treturn \"Image orienter\"\n}\n"
  },
  {
    "path": "imageprocessor/imageprocessor.go",
    "content": "package imageprocessor\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/Imgur/mandible/config\"\n\t\"github.com/Imgur/mandible/uploadedfile\"\n)\n\ntype ProcessType interface {\n\tProcess(image *uploadedfile.UploadedFile) error\n\tString() string\n}\n\ntype multiProcessType []ProcessType\n\nfunc (this multiProcessType) Process(image *uploadedfile.UploadedFile) error {\n\tfor _, processor := range this {\n\t\terr := processor.Process(image)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error multiprocessing on %s: %s\", processor.String(), err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (this multiProcessType) String() string {\n\tprocesses := make([]string, 0)\n\tfor _, p := range this {\n\t\tprocesses = append(processes, p.String())\n\t}\n\treturn \"Multiple processes <\" + strings.Join(processes, \", \") + \">\"\n}\n\ntype asyncProcessType []ProcessType\n\nfunc (this asyncProcessType) Process(image *uploadedfile.UploadedFile) error {\n\terrs := make(chan error, len(this))\n\n\tfor _, processor := range this {\n\t\tgo func(p ProcessType) {\n\t\t\terr := p.Process(image)\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"Error asynchronously processing on %s: %s\", p.String(), err.Error())\n\t\t\t} else {\n\t\t\t\terrs <- nil\n\t\t\t}\n\t\t}(processor)\n\t}\n\n\tfor i := 0; i < len(this); i++ {\n\t\tselect {\n\t\tcase err := <-errs:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (this asyncProcessType) String() string {\n\tprocesses := make([]string, 0)\n\tfor _, p := range this {\n\t\tprocesses = append(processes, p.String())\n\t}\n\treturn \"Async processes <\" + strings.Join(processes, \", \") + \">\"\n}\n\ntype ImageProcessor struct {\n\tprocessor ProcessType\n}\n\nfunc (this *ImageProcessor) Run(image *uploadedfile.UploadedFile) error {\n\treturn this.processor.Process(image)\n}\n\ntype ImageProcessorStrategy func(*config.Configuration, *uploadedfile.UploadedFile) (*ImageProcessor, error)\n\n// Just do nothing to the file after it's uploaded...\nvar PassthroughStrategy = func(cfg *config.Configuration, file *uploadedfile.UploadedFile) (*ImageProcessor, error) {\n\treturn &ImageProcessor{multiProcessType{}}, nil\n}\n\nvar ThumbnailStrategy = func(cfg *config.Configuration, file *uploadedfile.UploadedFile) (*ImageProcessor, error) {\n\tprocessor := asyncProcessType{}\n\tfor _, t := range file.GetThumbs() {\n\t\tprocessor = append(processor, t)\n\t}\n\n\treturn &ImageProcessor{processor}, nil\n}\n\nvar EverythingStrategy = func(cfg *config.Configuration, file *uploadedfile.UploadedFile) (*ImageProcessor, error) {\n\tsize, err := file.FileSize()\n\tif err != nil {\n\t\treturn &ImageProcessor{}, err\n\t}\n\n\tprocessor := multiProcessType{}\n\tprocessor = append(processor, &ImageOrienter{})\n\tprocessor = append(processor, &CompressLosslessly{})\n\tprocessor = append(processor, &ExifStripper{})\n\n\tif size > cfg.MaxFileSize {\n\t\tprocessor = append(processor, &ImageScaler{cfg.MaxFileSize})\n\t}\n\n\tasync := asyncProcessType{}\n\n\tasync = append(async, DuelOCRStratagy())\n\tfor _, t := range file.GetThumbs() {\n\t\tasync = append(async, t)\n\t}\n\n\tif len(async) > 0 {\n\t\tprocessor = append(processor, async)\n\t}\n\n\treturn &ImageProcessor{processor}, nil\n}\n"
  },
  {
    "path": "imageprocessor/imagescaler.go",
    "content": "package imageprocessor\n\nimport (\n\t\"errors\"\n\n\t\"github.com/Imgur/mandible/imageprocessor/processorcommand\"\n\t\"github.com/Imgur/mandible/uploadedfile\"\n)\n\ntype ImageScaler struct {\n\ttargetSize int64\n}\n\nfunc (this *ImageScaler) Process(image *uploadedfile.UploadedFile) error {\n\tswitch image.GetMime() {\n\tcase \"image/jpeg\", \"image/jpg\":\n\t\treturn this.scaleJpeg(image)\n\tcase \"image/png\":\n\t\treturn this.scalePng(image)\n\tcase \"image/gif\":\n\t\treturn this.scaleGif(image)\n\t}\n\n\treturn errors.New(\"Unsuported filetype\")\n}\n\nfunc (this *ImageScaler) String() string {\n\treturn \"Image scaler\"\n}\n\nfunc (this *ImageScaler) scalePng(image *uploadedfile.UploadedFile) error {\n\tfilename, err := processorcommand.ConvertToJpeg(image.GetPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage.SetPath(filename)\n\timage.SetMime(\"image/jpeg\")\n\treturn this.scaleJpeg(image)\n}\n\nfunc (this *ImageScaler) scaleJpeg(image *uploadedfile.UploadedFile) error {\n\tfilename, err := processorcommand.Quality(image.GetPath(), 90)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage.SetPath(filename)\n\tsize, err := image.FileSize()\n\tif size < this.targetSize {\n\t\treturn nil\n\t}\n\n\tfilename, err = processorcommand.Quality(image.GetPath(), 70)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage.SetPath(filename)\n\tsize, err = image.FileSize()\n\tif size < this.targetSize {\n\t\treturn nil\n\t}\n\n\tpercent := 90\n\tif (size - this.targetSize) >= (15 * 1024 * 1024) {\n\t\tpercent = 30\n\t} else if (size - this.targetSize) >= (10 * 1024 * 1024) {\n\t\tpercent = 40\n\t} else if (size - this.targetSize) >= (5 * 1024 * 1024) {\n\t\tpercent = 60\n\t}\n\n\tfor {\n\t\tfilename, err = processorcommand.ResizePercent(image.GetPath(), percent)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\timage.SetPath(filename)\n\t\tsize, err := image.FileSize()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if size == 0 || percent < 10 {\n\t\t\treturn errors.New(\"Could not scale image to desired filesize\")\n\t\t} else if size < this.targetSize {\n\t\t\treturn nil\n\t\t}\n\n\t\tpercent -= 10\n\t}\n}\n\nfunc (this *ImageScaler) scaleGif(image *uploadedfile.UploadedFile) error {\n\treturn errors.New(\"Unimplimented\")\n}\n"
  },
  {
    "path": "imageprocessor/ocr.go",
    "content": "package imageprocessor\n\nimport (\n\t\"github.com/Imgur/mandible/imageprocessor/processorcommand\"\n\t\"github.com/Imgur/mandible/uploadedfile\"\n\n\t\"log\"\n)\n\ntype OCRRunner struct {\n\tCommand processorcommand.OCRCommand\n}\n\nfunc (this *OCRRunner) Process(image *uploadedfile.UploadedFile) error {\n\tresult, err := this.Command.Run(image.GetPath())\n\tif err != nil {\n\t\tlog.Printf(\"Error running OCR: %s\", err.Error())\n\t\treturn err\n\t}\n\n\timage.SetOCRText(result.Text)\n\n\treturn nil\n}\n\nfunc (this *OCRRunner) String() string {\n\treturn \"OCR runner\"\n}\n\nvar DuelOCRStratagy = func() *OCRRunner {\n\tmulti := processorcommand.MultiOCRCommand{}\n\tmulti = append(multi, processorcommand.NewMemeOCR())\n\tmulti = append(multi, processorcommand.NewStandardOCR())\n\n\treturn &OCRRunner{multi}\n}\n\nvar StandardOCRStratagy = func() *OCRRunner {\n\treturn &OCRRunner{processorcommand.NewStandardOCR()}\n}\n\nvar MemeOCRStratagy = func() *OCRRunner {\n\treturn &OCRRunner{processorcommand.NewMemeOCR()}\n}\n"
  },
  {
    "path": "imageprocessor/ocr_test.go",
    "content": "package imageprocessor\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/Imgur/mandible/uploadedfile\"\n)\n\nfunc TestStandardOCR(t *testing.T) {\n\timage, err := getUploadedFileObject()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not initialize standard OCR test\")\n\t}\n\tdefer image.Clean()\n\n\tocrStratagy := StandardOCRStratagy()\n\tocrStratagy.Process(image)\n\n\tif image.GetOCRText() != \"hello\" {\n\t\tt.Fatalf(\"Did not get proper standard OCR text back %s != hello\", image.GetOCRText())\n\t}\n}\n\nfunc getUploadedFileObject() (*uploadedfile.UploadedFile, error) {\n\tfilename, err := copyTestImage(\"testdata/ocrtestimage.png\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timage, err := uploadedfile.NewUploadedFile(\"ocrtestimage.png\", filename, nil)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Could not initialize standard OCR test\")\n\t}\n\n\treturn image, nil\n}\n\nfunc copyTestImage(filename string) (string, error) {\n\tuploadFile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer uploadFile.Close()\n\n\ttmpFile, err := ioutil.TempFile(os.TempDir(), \"image\")\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Unable to write to /tmp\")\n\t}\n\tdefer tmpFile.Close()\n\n\t_, err = io.Copy(tmpFile, uploadFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tmpFile.Name(), nil\n}\n"
  },
  {
    "path": "imageprocessor/processorcommand/gm.go",
    "content": "package processorcommand\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/Imgur/mandible/imageprocessor/thumbType\"\n)\n\nconst GM_COMMAND = \"gm\"\n\nfunc ConvertToJpeg(filename string) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_jpg\", filename)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfilename,\n\t\t\"-flatten\",\n\t\t\"JPEG:\" + outfile,\n\t}\n\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc FixOrientation(filename string) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_ort\", filename)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfilename,\n\t\t\"-auto-orient\",\n\t\toutfile,\n\t}\n\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc Quality(filename string, quality int) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_q\", filename)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfilename,\n\t\t\"-quality\",\n\t\tfmt.Sprintf(\"%d\", quality),\n\t\t\"-density\",\n\t\t\"72x72\",\n\t\toutfile,\n\t}\n\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc ResizePercent(filename string, percent int) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_rp\", filename)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfilename,\n\t\t\"-resize\",\n\t\tfmt.Sprintf(\"%d%%\", percent),\n\t\toutfile,\n\t}\n\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc SquareThumb(filename, name string, size int, quality int, format thumbType.ThumbType) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_%s\", filename, name)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfmt.Sprintf(\"%s[0]\", filename),\n\t\t\"-resize\",\n\t\tfmt.Sprintf(\"%dx%d^\", size, size),\n\t\t\"-gravity\",\n\t\t\"center\",\n\t\t\"-crop\",\n\t\tfmt.Sprintf(\"%dx%d+0+0\", size, size),\n\t\t\"-density\",\n\t\t\"72x72\",\n\t\t\"-unsharp\",\n\t\t\"0.5\",\n\t}\n\n\tif quality >= 0 {\n\t\targs = append(args,\n\t\t\t\"-quality\",\n\t\t\tfmt.Sprintf(\"%d\", quality),\n\t\t)\n\t}\n\n\targs = append(args, fmt.Sprintf(\"%s:%s\", format.ToString(), outfile))\n\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc Thumb(filename, name string, width, height int, quality int, format thumbType.ThumbType) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_%s\", filename, name)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfmt.Sprintf(\"%s[0]\", filename),\n\t\t\"-resize\",\n\t\tfmt.Sprintf(\"%dx%d>\", width, height),\n\t\t\"-density\",\n\t\t\"72x72\",\n\t}\n\n\tif quality >= 0 {\n\t\targs = append(args,\n\t\t\t\"-quality\",\n\t\t\tfmt.Sprintf(\"%d\", quality),\n\t\t)\n\t}\n\n\targs = append(args, fmt.Sprintf(\"%s:%s\", format.ToString(), outfile))\n\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc CircleThumb(filename, name string, width int, quality int, format thumbType.ThumbType) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_%s\", filename, name)\n\n\tfilename, err := SquareThumb(filename, name, width, quality, format)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\targs := []string{\n\t\t\"convert\",\n\t\t\"-size\",\n\t\tfmt.Sprintf(\"%dx%d\", width, width),\n\t\t\"xc:none\",\n\t\t\"-fill\",\n\t\tfilename,\n\t\t\"-quality\",\n\t\t\"83\",\n\t\t\"-density\",\n\t\t\"72x72\",\n\t\t\"-draw\",\n\t\tfmt.Sprintf(\"circle %d,%d %d,1\", width/2, width/2, width/2),\n\t}\n\n\tif quality >= 0 {\n\t\targs = append(args,\n\t\t\t\"-quality\",\n\t\t\tfmt.Sprintf(\"%d\", quality),\n\t\t)\n\t}\n\n\targs = append(args, fmt.Sprintf(\"PNG:%s\", outfile))\n\n\terr = runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc CustomThumb(filename, name string, width, height int, cropGravity string, cropWidth, cropHeight, quality int, format thumbType.ThumbType) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_%s\", filename, name)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfmt.Sprintf(\"%s[0]\", filename),\n\t\t\"-resize\",\n\t\tfmt.Sprintf(\"%dx%d^\", width, height),\n\t\t\"-density\",\n\t\t\"72x72\",\n\t}\n\n\tif quality != -1 {\n\t\targs = append(args,\n\t\t\t\"-quality\",\n\t\t\tfmt.Sprintf(\"%d\", quality),\n\t\t)\n\t}\n\n\tif cropGravity != \"\" {\n\t\targs = append(args,\n\t\t\t\"-gravity\",\n\t\t\tfmt.Sprintf(\"%s\", cropGravity),\n\t\t\t\"-crop\",\n\t\t\tfmt.Sprintf(\"%dx%d+0+0\", cropWidth, cropHeight),\n\t\t)\n\t}\n\n\targs = append(args, fmt.Sprintf(\"%s:%s\", format.ToString(), outfile))\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n\nfunc Full(filename string, name string, quality int, format thumbType.ThumbType) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_%s\", filename, name)\n\n\targs := []string{\n\t\t\"convert\",\n\t\tfmt.Sprintf(\"%s[0]\", filename),\n\t\t\"-density\",\n\t\t\"72x72\",\n\t}\n\n\tif quality >= 0 {\n\t\targs = append(args,\n\t\t\t\"-quality\",\n\t\t\tfmt.Sprintf(\"%d\", quality),\n\t\t)\n\t}\n\n\targs = append(args, fmt.Sprintf(\"%s:%s\", format.ToString(), outfile))\n\n\terr := runProcessorCommand(GM_COMMAND, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n"
  },
  {
    "path": "imageprocessor/processorcommand/jpegtran.go",
    "content": "package processorcommand\n\nimport (\n\t\"fmt\"\n)\n\nfunc Jpegtran(filename string) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_opti\", filename)\n\n\targs := []string{\n\t\t\"-copy\",\n\t\t\"all\",\n\t\t\"-optimize\",\n\t\t\"-outfile\",\n\t\toutfile,\n\t\tfilename,\n\t}\n\n\terr := runProcessorCommand(\"jpegtran\", args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n"
  },
  {
    "path": "imageprocessor/processorcommand/ocrcommands.go",
    "content": "package processorcommand\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/trustmaster/go-aspell\"\n)\n\ntype OCRResult struct {\n\tType string\n\tText string\n}\n\nfunc newOCRResult(ocrType string, result string) *OCRResult {\n\treturn &OCRResult{\n\t\tocrType,\n\t\tresult,\n\t}\n}\n\nfunc (this *OCRResult) removeNonWords() {\n\tblob := this.Text\n\n\tspeller, err := aspell.NewSpeller(map[string]string{\n\t\t\"lang\": \"en_US\",\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\", err.Error())\n\t\treturn\n\t}\n\tdefer speller.Delete()\n\n\tsingleCharWords := regexp.MustCompile(\"(a|i)\")\n\tnumberRegex := regexp.MustCompile(\"\\\\d{3,}\")\n\twordRegexp := regexp.MustCompile(\"\\\\b(\\\\w+)\\\\b\")\n\twords := wordRegexp.FindAllString(blob, -1)\n\n\tstr := \"\"\n\n\tfor _, word := range words {\n\t\tif numberRegex.MatchString(word) {\n\t\t\tstr += \" \" + word\n\t\t} else if len(word) == 1 {\n\t\t\tif singleCharWords.MatchString(word) {\n\t\t\t\tstr += \" \" + word\n\t\t\t}\n\t\t} else if speller.Check(word) {\n\t\t\tstr += \" \" + word\n\t\t}\n\t}\n\n\tthis.Text = strings.TrimSpace(str)\n}\n\nfunc (this *OCRResult) wordCount(blob string) int {\n\tword_regexp := regexp.MustCompile(\"\\\\b(\\\\w+)\\\\b\")\n\twords := word_regexp.FindAllString(blob, -1)\n\n\t// don't let single char words count towards the overal word count. Gets thrown off by poor OCR results\n\tcount := 0\n\tfor _, word := range words {\n\t\tif len(word) > 1 {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}\n\ntype MultiOCRCommand []OCRCommand\n\nfunc (this MultiOCRCommand) Run(image string) (*OCRResult, error) {\n\tresults := make(chan *OCRResult, len(this))\n\terrs := make(chan error, len(this))\n\n\tfor _, command := range this {\n\t\tgo func(c OCRCommand) {\n\t\t\tk, err := c.Run(image)\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresults <- k\n\t\t}(command)\n\t}\n\n\tmax := -1\n\tvar best *OCRResult\n\n\tfor i := 0; i < len(this); i++ {\n\t\tselect {\n\t\tcase result := <-results:\n\t\t\tresult.removeNonWords()\n\t\t\tcount := result.wordCount(result.Text)\n\n\t\t\tif count > max {\n\t\t\t\tbest = result\n\t\t\t\tmax = count\n\t\t\t}\n\n\t\tcase err := <-errs:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Return the average, same as before.\n\treturn best, nil\n}\n\ntype OCRCommand interface {\n\tRun(image string) (*OCRResult, error)\n}\n\ntype MemeOCR struct {\n\tname string\n}\n\nfunc NewMemeOCR() *MemeOCR {\n\treturn &MemeOCR{\n\t\t\"MemeOCR\",\n\t}\n}\n\nfunc (this *MemeOCR) Run(image string) (*OCRResult, error) {\n\timageTif := fmt.Sprintf(\"%s_meme.jpg\", image)\n\toutText := fmt.Sprintf(\"%s_meme\", image)\n\tinImage := fmt.Sprintf(\"%s[0]\", image)\n\tpreprocessingArgs := []string{\"convert\", inImage, \"-resize\", \"400%\", \"-fill\", \"black\", \"-fuzz\", \"10%\", \"+matte\", \"-matte\", \"-transparent\", \"white\", imageTif}\n\ttesseractArgs := []string{\"-l\", \"meme\", imageTif, outText}\n\n\terr := runProcessorCommand(GM_COMMAND, preprocessingArgs)\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Meme preprocessing command failed with error = %v\", err))\n\t}\n\tdefer os.Remove(imageTif)\n\n\terr = runProcessorCommand(\"tesseract\", tesseractArgs)\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Meme tesseract command failed with error = %v\", err))\n\t}\n\tdefer os.Remove(outText + \".txt\")\n\n\ttext, err := ioutil.ReadFile(outText + \".txt\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := strings.ToLower(strings.TrimSpace(string(text[:])))\n\n\treturn newOCRResult(this.name, result), nil\n}\n\ntype StandardOCR struct {\n\tname string\n}\n\nfunc NewStandardOCR() *StandardOCR {\n\treturn &StandardOCR{\n\t\t\"StandardOCR\",\n\t}\n}\n\nfunc (this *StandardOCR) Run(image string) (*OCRResult, error) {\n\timageTif := fmt.Sprintf(\"%s_standard.jpg\", image)\n\toutText := fmt.Sprintf(\"%s_standard\", image)\n\tinImage := fmt.Sprintf(\"%s[0]\", image)\n\tpreprocessingArgs := []string{\"convert\", inImage, \"-resize\", \"400%\", \"-type\", \"Grayscale\", imageTif}\n\ttesseractArgs := []string{\"-l\", \"eng\", imageTif, outText}\n\n\terr := runProcessorCommand(GM_COMMAND, preprocessingArgs)\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Standard preprocessing command failed with error = %v\", err))\n\t}\n\tdefer os.Remove(imageTif)\n\n\terr = runProcessorCommand(\"tesseract\", tesseractArgs)\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Standard tesseract command failed with error = %v\", err))\n\t}\n\tdefer os.Remove(outText + \".txt\")\n\n\ttext, err := ioutil.ReadFile(outText + \".txt\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := strings.ToLower(strings.TrimSpace(string(text[:])))\n\n\treturn newOCRResult(this.name, result), nil\n}\n"
  },
  {
    "path": "imageprocessor/processorcommand/optipng.go",
    "content": "package processorcommand\n\nimport (\n\t\"fmt\"\n)\n\nfunc Optipng(filename string) (string, error) {\n\toutfile := fmt.Sprintf(\"%s_opi\", filename)\n\n\targs := []string{\n\t\t\"-fix\",\n\t\t\"-out\",\n\t\toutfile,\n\t\tfilename,\n\t}\n\n\terr := runProcessorCommand(\"optipng\", args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn outfile, nil\n}\n"
  },
  {
    "path": "imageprocessor/processorcommand/runner.go",
    "content": "package processorcommand\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"log\"\n\t\"os/exec\"\n\t\"time\"\n)\n\nfunc runProcessorCommand(command string, args []string) error {\n\tcmd := exec.Command(command, args...)\n\n\tvar out bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &stderr\n\n\tcmd.Start()\n\n\tcmdDone := make(chan error, 1)\n\tgo func() {\n\t\tcmdDone <- cmd.Wait()\n\t}()\n\n\tselect {\n\tcase <-time.After(time.Duration(60) * time.Second):\n\t\tkillCmd(cmd)\n\t\t<-cmdDone\n\t\treturn errors.New(\"Command timed out\")\n\tcase err := <-cmdDone:\n\t\tif err != nil {\n\t\t\tlog.Println(stderr.String())\n\t\t}\n\n\t\treturn err\n\t}\n}\n\nfunc killCmd(cmd *exec.Cmd) {\n\tif err := cmd.Process.Kill(); err != nil {\n\t\tlog.Printf(\"Failed to kill command: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "imageprocessor/processorcommand/stripmetadata.go",
    "content": "package processorcommand\n\nfunc StripMetadata(filename string) error {\n\targs := []string{\n\t\t\"-all=\",\n\t\t\"--icc_profile:all\",\n\t\t\"-overwrite_original\",\n\t\tfilename,\n\t}\n\n\terr := runProcessorCommand(\"exiftool\", args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "imageprocessor/thumbType/thumbType.go",
    "content": "package thumbType\n\ntype ThumbType int\n\nconst (\n\tUNKNOWN ThumbType = iota\n\tJPG\n\tPNG\n\tGIF\n\tWEBP\n)\n\nfunc (this ThumbType) ToString() string {\n\tswitch this {\n\tcase JPG:\n\t\treturn \"JPG\"\n\tcase PNG:\n\t\treturn \"PNG\"\n\tcase GIF:\n\t\treturn \"GIF\"\n\tcase WEBP:\n\t\treturn \"WEBP\"\n\tdefault:\n\t\treturn \"UNKNOWN\"\n\t}\n}\n\nfunc FromMime(mime string) ThumbType {\n\tswitch mime {\n\tcase \"image/jpeg\":\n\t\treturn JPG\n\tcase \"image/png\":\n\t\treturn PNG\n\tcase \"image/gif\":\n\t\treturn GIF\n\tcase \"image/webp\":\n\t\treturn WEBP\n\tdefault:\n\t\treturn UNKNOWN\n\t}\n}\n\nfunc FromString(format string) ThumbType {\n\tswitch format {\n\tcase \"jpg\":\n\t\treturn JPG\n\tcase \"jpeg\":\n\t\treturn JPG\n\tcase \"png\":\n\t\treturn PNG\n\tcase \"gif\":\n\t\treturn GIF\n\tcase \"webp\":\n\t\treturn WEBP\n\tdefault:\n\t\treturn UNKNOWN\n\t}\n}\n"
  },
  {
    "path": "imagestore/factory.go",
    "content": "package imagestore\n\nimport (\n\t\"io/ioutil\"\n\t\"log\"\n\n\t\"github.com/Imgur/mandible/config\"\n\t\"github.com/mitchellh/goamz/aws\"\n\t\"github.com/mitchellh/goamz/s3\"\n\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\tgcloud \"google.golang.org/cloud\"\n\tgcs \"google.golang.org/cloud/storage\"\n)\n\ntype Factory struct {\n\tconf *config.Configuration\n}\n\nfunc NewFactory(conf *config.Configuration) *Factory {\n\treturn &Factory{conf}\n}\n\nfunc (this *Factory) NewImageStores() ImageStore {\n\tstores := MultiImageStore{}\n\tvar store ImageStore\n\n\tfor _, configWrapper := range this.conf.Stores {\n\t\tswitch configWrapper[\"Type\"] {\n\t\tcase \"s3\":\n\t\t\tstore = this.NewS3ImageStore(configWrapper)\n\t\t\tstores = append(stores, store)\n\t\tcase \"gcs\":\n\t\t\tstore = this.NewGCSImageStore(configWrapper)\n\t\t\tstores = append(stores, store)\n\t\tcase \"local\":\n\t\t\tstore = this.NewLocalImageStore(configWrapper)\n\t\t\tstores = append(stores, store)\n\t\tcase \"memory\":\n\t\t\tstore = NewInMemoryImageStore()\n\t\t\tstores = append(stores, store)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unsupported store %s\", configWrapper[\"Type\"])\n\t\t}\n\t}\n\n\tif len(this.conf.Stores) == 1 {\n\t\treturn store\n\t}\n\n\t// return a MultiImageStore type if more then 1 store was specified in the config\n\treturn stores\n}\n\nfunc (this *Factory) NewS3ImageStore(conf map[string]string) ImageStore {\n\tbucket := conf[\"BucketName\"]\n\n\tauth, err := aws.GetAuth(conf[\"AWSKey\"], conf[\"AWSSecret\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tclient := s3.New(auth, aws.Regions[conf[\"Region\"]])\n\tmapper := NewNamePathMapper(conf[\"NamePathRegex\"], conf[\"NamePathMap\"])\n\n\treturn NewS3ImageStore(\n\t\tbucket,\n\t\tconf[\"StoreRoot\"],\n\t\tclient,\n\t\tmapper,\n\t)\n}\n\nfunc (this *Factory) NewGCSImageStore(conf map[string]string) ImageStore {\n\tjsonKey, err := ioutil.ReadFile(conf[\"KeyFile\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcloudConf, err := google.JWTConfigFromJSON(\n\t\tjsonKey,\n\t\tgcs.ScopeFullControl,\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbucket := conf[\"BucketName\"]\n\n\tctx := gcloud.NewContext(conf[\"AppID\"], cloudConf.Client(oauth2.NoContext))\n\tmapper := NewNamePathMapper(conf[\"NamePathRegex\"], conf[\"NamePathMap\"])\n\n\treturn NewGCSImageStore(\n\t\tctx,\n\t\tbucket,\n\t\tconf[\"StoreRoot\"],\n\t\tmapper,\n\t)\n}\n\nfunc (this *Factory) NewLocalImageStore(conf map[string]string) ImageStore {\n\tmapper := NewNamePathMapper(conf[\"NamePathRegex\"], conf[\"NamePathMap\"])\n\treturn NewLocalImageStore(conf[\"StoreRoot\"], mapper)\n}\n\nfunc (this *Factory) NewStoreObject(id string, mime string, size string) *StoreObject {\n\treturn &StoreObject{\n\t\tId:       id,\n\t\tMimeType: mime,\n\t\tSize:     size,\n\t}\n}\n\nfunc (this *Factory) NewHashGenerator(store ImageStore) *HashGenerator {\n\thashGen := &HashGenerator{\n\t\tmake(chan string),\n\t\tthis.conf.HashLength,\n\t\tstore,\n\t}\n\n\thashGen.init()\n\treturn hashGen\n}\n"
  },
  {
    "path": "imagestore/gcsstore.go",
    "content": "package imagestore\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/cloud/storage\"\n)\n\ntype GCSImageStore struct {\n\tctx            context.Context\n\tbucketName     string\n\tstoreRoot      string\n\tnamePathMapper *NamePathMapper\n}\n\nfunc NewGCSImageStore(ctx context.Context, bucket string, root string, mapper *NamePathMapper) *GCSImageStore {\n\treturn &GCSImageStore{\n\t\tctx:            ctx,\n\t\tbucketName:     bucket,\n\t\tstoreRoot:      root,\n\t\tnamePathMapper: mapper,\n\t}\n}\n\nfunc (this *GCSImageStore) Exists(obj *StoreObject) (bool, error) {\n\t_, err := storage.StatObject(this.ctx, this.bucketName, this.toPath(obj))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc (this *GCSImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) {\n\tsrcFd, err := os.Open(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer srcFd.Close()\n\n\tdata, err := ioutil.ReadAll(srcFd)\n\tif err != nil {\n\t\tlog.Printf(\"error on read file: %s\", err)\n\t\treturn nil, err\n\t}\n\n\twc := storage.NewWriter(this.ctx, this.bucketName, this.toPath(obj))\n\twc.ContentType = obj.MimeType\n\tif _, err := wc.Write(data); err != nil {\n\t\tlog.Printf(\"error on write data: %s\", err)\n\t\treturn nil, err\n\t}\n\tif err := wc.Close(); err != nil {\n\t\tlog.Printf(\"error on close writer: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tobj.Url = \"https://storage.googleapis.com/\" + this.bucketName + \"/\" + this.toPath(obj)\n\treturn obj, nil\n}\n\nfunc (this *GCSImageStore) Get(obj *StoreObject) (io.ReadCloser, error) {\n\treader, err := storage.NewReader(this.ctx, this.bucketName, this.toPath(obj))\n\tif err != nil {\n\t\tlog.Printf(\"error on read file: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn reader, nil\n}\n\nfunc (this *GCSImageStore) String() string {\n\treturn \"GCSStore\"\n}\n\nfunc (this *GCSImageStore) toPath(obj *StoreObject) string {\n\tif this.storeRoot != \"\" {\n\t\treturn this.storeRoot + \"/\" + this.namePathMapper.mapToPath(obj)\n\t}\n\treturn this.namePathMapper.mapToPath(obj)\n}\n"
  },
  {
    "path": "imagestore/hash.go",
    "content": "package imagestore\n\nimport (\n\t\"crypto/rand\"\n\t\"log\"\n)\n\n// Provides a continuous stream of random image \"hashes\" of a fixed length that is unique (does not exist in the store).\ntype HashGenerator struct {\n\thashGetter chan string\n\tlength     int\n\tstore      ImageStore\n}\n\nfunc (this *HashGenerator) init() {\n\tgo func() {\n\t\tstoreObj := &StoreObject{\n\t\t\t\"\",\n\t\t\t\"\",\n\t\t\t\"original\",\n\t\t\t\"\",\n\t\t}\n\n\t\tfor {\n\t\t\tstr := \"\"\n\n\t\t\tfor len(str) < this.length {\n\t\t\t\tc := 10\n\t\t\t\tbArr := make([]byte, c)\n\t\t\t\t_, err := rand.Read(bArr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"error:\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tfor _, b := range bArr {\n\t\t\t\t\tif len(str) == this.length {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\t/**\n\t\t\t\t\t * Each byte will be in [0, 256), but we only care about:\n\t\t\t\t\t *\n\t\t\t\t\t * [48, 57]     0-9\n\t\t\t\t\t * [65, 90]     A-Z\n\t\t\t\t\t * [97, 122]    a-z\n\t\t\t\t\t *\n\t\t\t\t\t * Which means that the highest bit will always be zero, since the last byte with high bit\n\t\t\t\t\t * zero is 01111111 = 127 which is higher than 122. Lower our odds of having to re-roll a byte by\n\t\t\t\t\t * dividing by two (right bit shift of 1).\n\t\t\t\t\t */\n\n\t\t\t\t\tb = b >> 1\n\n\t\t\t\t\t// The byte is any of        0-9                  A-Z                      a-z\n\t\t\t\t\tbyteIsAllowable := (b >= 48 && b <= 57) || (b >= 65 && b <= 90) || (b >= 97 && b <= 122)\n\n\t\t\t\t\tif byteIsAllowable {\n\t\t\t\t\t\tstr += string(b)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tstoreObj.Id = str\n\n\t\t\texists, _ := this.store.Exists(storeObj)\n\t\t\tif !exists {\n\t\t\t\tthis.hashGetter <- str\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (this *HashGenerator) Get() string {\n\treturn <-this.hashGetter\n}\n"
  },
  {
    "path": "imagestore/localstore.go",
    "content": "package imagestore\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"path\"\n)\n\n// A LocalImageStore stores images on the local disk.\ntype LocalImageStore struct {\n\tstoreRoot      string\n\tnamePathMapper *NamePathMapper\n}\n\nfunc NewLocalImageStore(root string, mapper *NamePathMapper) *LocalImageStore {\n\treturn &LocalImageStore{\n\t\tstoreRoot:      root,\n\t\tnamePathMapper: mapper,\n\t}\n}\n\nfunc (this *LocalImageStore) Exists(obj *StoreObject) (bool, error) {\n\tif _, err := os.Stat(this.toPath(obj)); os.IsNotExist(err) {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc (this *LocalImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) {\n\tsrcFd, err := os.Open(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer srcFd.Close()\n\n\t// open output file\n\tthis.createParent(obj)\n\tfo, err := os.Create(this.toPath(obj))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer fo.Close()\n\n\t_, err = io.Copy(fo, srcFd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tobj.Url = this.toPath(obj)\n\treturn obj, nil\n}\n\nfunc (this *LocalImageStore) Get(obj *StoreObject) (io.ReadCloser, error) {\n\treader, err := os.Open(this.toPath(obj))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn reader, nil\n}\n\nfunc (this *LocalImageStore) String() string {\n\treturn \"LocalStore\"\n}\n\nfunc (this *LocalImageStore) createParent(obj *StoreObject) {\n\tpath := path.Dir(this.toPath(obj))\n\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tos.MkdirAll(path, 0777)\n\t}\n}\n\nfunc (this *LocalImageStore) toPath(obj *StoreObject) string {\n\treturn this.storeRoot + \"/\" + this.namePathMapper.mapToPath(obj)\n}\n"
  },
  {
    "path": "imagestore/memorystore.go",
    "content": "package imagestore\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype InMemoryImageStore struct {\n\tfiles map[string]string // name -> contents\n\trw    sync.Mutex\n}\n\nfunc NewInMemoryImageStore() *InMemoryImageStore {\n\treturn &InMemoryImageStore{\n\t\tfiles: make(map[string]string),\n\t\trw:    sync.Mutex{},\n\t}\n}\n\nfunc (this *InMemoryImageStore) Exists(obj *StoreObject) (bool, error) {\n\tthis.rw.Lock()\n\n\t_, ok := this.files[obj.Id]\n\n\tthis.rw.Unlock()\n\n\treturn ok, nil\n}\n\nfunc (this *InMemoryImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) {\n\tsrcFd, err := os.Open(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer srcFd.Close()\n\n\tdata, err := ioutil.ReadAll(srcFd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tthis.rw.Lock()\n\tthis.files[obj.Id] = string(data)\n\tthis.rw.Unlock()\n\n\treturn obj, nil\n}\n\nfunc (this *InMemoryImageStore) Get(obj *StoreObject) (io.ReadCloser, error) {\n\tthis.rw.Lock()\n\tdata, ok := this.files[obj.Id]\n\tthis.rw.Unlock()\n\n\tif !ok {\n\t\treturn nil, errors.New(\"File doesn't exist\")\n\t}\n\n\treader := strings.NewReader(data)\n\treadCloser := ioutil.NopCloser(reader)\n\treturn readCloser, nil\n}\n\nfunc (this *InMemoryImageStore) String() string {\n\treturn \"InMemoryStore\"\n}\n"
  },
  {
    "path": "imagestore/namepathmapper.go",
    "content": "package imagestore\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype NamePathMapper struct {\n\tregex   *regexp.Regexp\n\treplace string\n}\n\nfunc NewNamePathMapper(expr string, mapping string) *NamePathMapper {\n\tvar r *regexp.Regexp\n\tif len(expr) > 0 {\n\t\tr = regexp.MustCompile(expr)\n\t}\n\n\treturn &NamePathMapper{\n\t\tr,\n\t\tmapping,\n\t}\n}\n\nfunc (this *NamePathMapper) mapToPath(obj *StoreObject) string {\n\trepl := strings.Replace(this.replace, \"${ImageName}\", obj.Id, -1)\n\trepl = strings.Replace(repl, \"${ImageSize}\", obj.Size, -1)\n\n\tif this.regex != nil {\n\t\treturn this.regex.ReplaceAllString(obj.Id, repl)\n\t}\n\n\treturn repl\n}\n"
  },
  {
    "path": "imagestore/s3store.go",
    "content": "package imagestore\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/mitchellh/goamz/s3\"\n)\n\ntype S3ImageStore struct {\n\tbucketName     string\n\tstoreRoot      string\n\tclient         *s3.S3\n\tnamePathMapper *NamePathMapper\n}\n\nfunc NewS3ImageStore(bucket string, root string, client *s3.S3, mapper *NamePathMapper) *S3ImageStore {\n\treturn &S3ImageStore{\n\t\tbucketName:     bucket,\n\t\tstoreRoot:      root,\n\t\tclient:         client,\n\t\tnamePathMapper: mapper,\n\t}\n}\n\nfunc (this *S3ImageStore) Exists(obj *StoreObject) (bool, error) {\n\tbucket := this.client.Bucket(this.bucketName)\n\tresponse, err := bucket.Head(this.toPath(obj))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn (response.StatusCode == 200), nil\n}\n\nfunc (this *S3ImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) {\n\tsrcFd, err := os.Open(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer srcFd.Close()\n\n\tbucket := this.client.Bucket(this.bucketName)\n\n\tstats, err := srcFd.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = bucket.PutReader(this.toPath(obj), srcFd, stats.Size(), obj.MimeType, s3.BucketOwnerFull)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tobj.Url = bucket.URL(this.toPath(obj))\n\treturn obj, nil\n}\n\nfunc (this *S3ImageStore) Get(obj *StoreObject) (io.ReadCloser, error) {\n\tbucket := this.client.Bucket(this.bucketName)\n\tdata, err := bucket.GetReader(this.toPath(obj))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\nfunc (this *S3ImageStore) String() string {\n\treturn \"S3Store\"\n}\n\nfunc (this *S3ImageStore) toPath(obj *StoreObject) string {\n\treturn this.storeRoot + \"/\" + this.namePathMapper.mapToPath(obj)\n}\n"
  },
  {
    "path": "imagestore/store.go",
    "content": "package imagestore\n\nimport (\n\t\"fmt\"\n\t\"io\"\n)\n\ntype ImageStore interface {\n\tSave(src string, obj *StoreObject) (*StoreObject, error)\n\tExists(obj *StoreObject) (bool, error)\n\tGet(obj *StoreObject) (io.ReadCloser, error)\n\tString() string\n}\n\ntype MultiImageStore []ImageStore\n\nfunc (this MultiImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) {\n\terrs := make(chan error, len(this))\n\n\tfor _, store := range this {\n\t\tgo func(s ImageStore) {\n\t\t\t_, err := s.Save(src, obj)\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"Error asynchronously saving image on %s: %s\", s.String(), err.Error())\n\t\t\t} else {\n\t\t\t\terrs <- nil\n\t\t\t}\n\t\t}(store)\n\t}\n\n\tfor i := 0; i < len(this); i++ {\n\t\tselect {\n\t\tcase err := <-errs:\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn obj, nil\n}\n\nfunc (this MultiImageStore) Exists(obj *StoreObject) (bool, error) {\n\terrs := make(chan error, len(this))\n\tresults := make(chan bool, len(this))\n\n\tfor _, store := range this {\n\t\tgo func(s ImageStore) {\n\t\t\tr, err := s.Exists(obj)\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"Error asynchronously proving existance for image on %s: %s\", s.String(), err.Error())\n\t\t\t} else {\n\t\t\t\tresults <- r\n\t\t\t}\n\t\t}(store)\n\t}\n\n\tfor i := 0; i < len(this); i++ {\n\t\tselect {\n\t\tcase err := <-errs:\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\tcase r := <-results:\n\t\t\tif r == true {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc (this MultiImageStore) Get(obj *StoreObject) (io.ReadCloser, error) {\n\terrs := make(chan error, len(this))\n\tresults := make(chan io.ReadCloser, 1)\n\tdone := make(chan bool, 1)\n\n\tfor _, store := range this {\n\t\tgo func(s ImageStore) {\n\t\t\tr, err := s.Get(obj)\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"Error asynchronously getting image on %s: %s\", s.String(), err.Error())\n\t\t\t} else {\n\t\t\t\tselect {\n\t\t\t\tcase done <- true:\n\t\t\t\t\tresults <- r\n\t\t\t\tdefault:\n\t\t\t\t\tr.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t}(store)\n\t}\n\n\tvar err error\n\n\tfor i := 0; i < len(this); i++ {\n\t\tselect {\n\t\tcase r := <-results:\n\t\t\treturn r, nil\n\t\tcase err = <-errs:\n\t\t}\n\t}\n\n\treturn nil, err\n}\n\nfunc (this MultiImageStore) String() string {\n\tstr := \"\"\n\n\tfor _, store := range this {\n\t\tstr += store.String()\n\t\tstr += \" \"\n\t}\n\n\treturn str\n}\n"
  },
  {
    "path": "imagestore/storeobject.go",
    "content": "package imagestore\n\ntype StorableObject interface {\n\tGetPath() string\n}\n\ntype StoreObject struct {\n\tId       string // Unique identifier\n\tMimeType string // i.e. image/jpg\n\tSize     string // i.e. thumb\n\tUrl      string // if publicly available\n}\n\nfunc (this *StoreObject) Store(s StorableObject, store ImageStore) error {\n\tpath := s.GetPath()\n\n\tobj, err := store.Save(path, this)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tthis.Url = obj.Url\n\n\treturn nil\n}\n"
  },
  {
    "path": "main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\n\tmandibleConf \"github.com/Imgur/mandible/config\"\n\tprocessors \"github.com/Imgur/mandible/imageprocessor\"\n\tmandible \"github.com/Imgur/mandible/server\"\n)\n\nfunc main() {\n\tconfigFile := os.Getenv(\"MANDIBLE_CONF\")\n\n\tconfig := mandibleConf.NewConfiguration(configFile)\n\n\tvar server *mandible.Server\n\tvar stats mandible.RuntimeStats\n\n\tif config.DatadogEnabled {\n\t\tvar err error\n\t\tstats, err = mandible.NewDatadogStats(config.DatadogHostname)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Invalid Datadog Hostname: %s\", config.DatadogHostname)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlog.Println(\"Stats init success\")\n\t} else {\n\t\tstats = &mandible.DiscardStats{}\n\t}\n\n\tif os.Getenv(\"AUTHENTICATION_HMAC_KEY\") != \"\" {\n\t\tkey := []byte(os.Getenv(\"AUTHENTICATION_HMAC_KEY\"))\n\t\tauth := mandible.NewHMACAuthenticatorSHA256(key)\n\t\tserver = mandible.NewAuthenticatedServer(config, processors.EverythingStrategy, auth, stats)\n\t} else {\n\t\tserver = mandible.NewServer(config, processors.EverythingStrategy, stats)\n\t}\n\n\tmuxer := http.NewServeMux()\n\tserver.Configure(muxer)\n\n\tport := fmt.Sprintf(\":%d\", server.Config.Port)\n\n\tlog.Printf(\"Listening on Port: %s\", port)\n\n\tstats.LogStartup()\n\thttp.ListenAndServe(port, muxer)\n}\n"
  },
  {
    "path": "server/authenticator.go",
    "content": "package server\n\nimport (\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"hash\"\n\t\"net/http\"\n\t\"time\"\n)\n\nvar (\n\tErrNoAuthentication = errors.New(\"No authentication scheme was configured.\")\n\tErrEmptyAuth        = errors.New(\"Empty or missing authentication header.\")\n\tErrNoGrantTime      = errors.New(\"No grant time specified in the authentication grant.\")\n\tErrExpiredGrant     = errors.New(\"The authentication grant has expired.\")\n\tErrMACMismatch      = errors.New(\"The provided message authentication code is invalid for the given message.\")\n)\n\ntype AuthenticatedUser struct {\n\tUserID               string    `json:\"user_id\"`\n\tGrantTime            time.Time `json:\"grant_time\"`\n\tGrantDurationSeconds int64     `json:\"grant_duration_sec\"`\n}\n\ntype Authenticator interface {\n\tGetUser(*http.Request) (*AuthenticatedUser, error)\n}\n\ntype PassthroughAuthenticator struct{}\n\nfunc (auth *PassthroughAuthenticator) GetUser(req *http.Request) (*AuthenticatedUser, error) {\n\treturn nil, ErrNoAuthentication\n}\n\ntype HMACAuthenticator struct {\n\tkey []byte\n\th   func() hash.Hash\n\tnow time.Time\n}\n\nfunc (auth *HMACAuthenticator) SetTime(t time.Time) {\n\tauth.now = t\n}\n\nfunc NewHMACAuthenticatorSHA256(key []byte) *HMACAuthenticator {\n\treturn &HMACAuthenticator{\n\t\tkey: key,\n\t\th:   sha256.New,\n\t}\n}\n\nfunc (auth *HMACAuthenticator) GetUser(req *http.Request) (*AuthenticatedUser, error) {\n\tauthHeader := []byte(req.Header.Get(\"Authorization\"))\n\tuserProvidedHmacBase64 := req.Header.Get(\"X-Authorization-HMAC\")\n\n\tif len(authHeader) == 0 || userProvidedHmacBase64 == \"\" {\n\t\treturn nil, ErrEmptyAuth\n\t}\n\n\tuserProvidedHmac, _ := base64.StdEncoding.DecodeString(userProvidedHmacBase64)\n\n\tmacWriter := hmac.New(auth.h, auth.key)\n\tmacWriter.Write(authHeader)\n\texpectedMac := macWriter.Sum(nil)\n\n\tif hmac.Equal(expectedMac, userProvidedHmac) {\n\t\tvar authUser AuthenticatedUser\n\t\terr := json.Unmarshal(authHeader, &authUser)\n\t\t// Valid JSON but no shared values will unmarshal to the zero valued authenticated user; only pass back\n\t\t// a non-zero-valued authenticated user\n\t\tif err == nil && authUser.UserID != \"\" {\n\t\t\tif authUser.GrantTime.IsZero() {\n\t\t\t\treturn nil, ErrNoGrantTime\n\t\t\t} else if authUser.GrantTime.Add(time.Duration(authUser.GrantDurationSeconds) * time.Second).Before(auth.now) {\n\t\t\t\treturn nil, ErrExpiredGrant\n\t\t\t} else {\n\t\t\t\treturn &authUser, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, ErrMACMismatch\n}\n"
  },
  {
    "path": "server/authenticator_test.go",
    "content": "package server\n\nimport (\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestPassthroughAuthenticatorAlwaysReturnsNilUser(t *testing.T) {\n\treq, _ := http.NewRequest(\"POST\", \"http://127.0.0.1/user/123/url\", nil)\n\n\tauthenticator := &PassthroughAuthenticator{}\n\tuser, err := authenticator.GetUser(req)\n\tif user != nil {\n\t\tt.Fatalf(\"Expected authenticator of the passthrough authenticator to be nil, instead %+v\", user)\n\t}\n\tif err != ErrNoAuthentication {\n\t\tt.Fatalf(\"Unexpected error: %s\", err.Error())\n\t}\n}\n\nfunc TestHMACAuthenticatorOnValidRequest(t *testing.T) {\n\tmessage := AuthenticatedUser{\n\t\tUserID:               \"123\",\n\t\tGrantTime:            time.Now(),\n\t\tGrantDurationSeconds: 365 * 24 * 3600,\n\t}\n\tmessageBytes, _ := json.Marshal(&message)\n\tmessageMacWriter := hmac.New(sha256.New, []byte(\"foobar\"))\n\tmessageMacWriter.Write(messageBytes)\n\tmessageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil))\n\n\treq, _ := http.NewRequest(\"POST\", \"http://127.0.0.1/user/123/url\", nil)\n\n\treq.Header.Set(\"Authorization\", string(messageBytes))\n\treq.Header.Set(\"X-Authorization-HMAC\", string(messageMac))\n\n\tauthenticator := NewHMACAuthenticatorSHA256([]byte(\"foobar\"))\n\tauthenticator.SetTime(time.Now())\n\tuser, err := authenticator.GetUser(req)\n\tif user == nil {\n\t\tt.Fatalf(\"Expected authenticator of of a valid response to not return nil\")\n\t}\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %s\", err.Error())\n\t}\n}\n\nfunc TestHMACAuthenticatorOnEmptyHeader(t *testing.T) {\n\treq, _ := http.NewRequest(\"POST\", \"http://127.0.0.1/user/123/url\", nil)\n\n\treq.Header.Set(\"Authorization\", \"\")\n\n\tauthenticator := NewHMACAuthenticatorSHA256([]byte(\"foobar\"))\n\tuser, err := authenticator.GetUser(req)\n\tif user != nil {\n\t\tt.Fatalf(\"Expected authenticator with no auth response to return nil\")\n\t}\n\tif err != ErrEmptyAuth {\n\t\tt.Fatalf(\"Unexpected error: %s\", err.Error())\n\t}\n}\n\nfunc TestHMACAuthenticatorOnInvalidRequest(t *testing.T) {\n\tmessage := AuthenticatedUser{\n\t\tUserID:               \"123\",\n\t\tGrantTime:            time.Now(),\n\t\tGrantDurationSeconds: 365 * 24 * 3600,\n\t}\n\tmessageBytes, _ := json.Marshal(&message)\n\t// wrong key!\n\tmessageMacWriter := hmac.New(sha256.New, []byte(\"jklfdsjklfsdjklfdsjklfsdjklfsd\"))\n\tmessageMacWriter.Write(messageBytes)\n\tmessageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil))\n\n\treq, _ := http.NewRequest(\"POST\", \"http://127.0.0.1/user/123/url\", nil)\n\n\treq.Header.Set(\"Authorization\", string(messageBytes))\n\treq.Header.Set(\"X-Authorization-HMAC\", string(messageMac))\n\n\tauthenticator := NewHMACAuthenticatorSHA256([]byte(\"foobar\"))\n\tauthenticator.SetTime(time.Now())\n\tuser, err := authenticator.GetUser(req)\n\tif user != nil {\n\t\tt.Fatalf(\"Expected authenticator of of an invalid response to return nil\")\n\t}\n\tif err != ErrMACMismatch {\n\t\tt.Fatalf(\"Unexpected error: %s\", err.Error())\n\t}\n}\n\nfunc TestHMACAuthenticatorOnExpiredGrant(t *testing.T) {\n\tgrantedTime := time.Now()\n\trequestTime := time.Now().Add(time.Hour)\n\tmessage := AuthenticatedUser{\n\t\tUserID:               \"123\",\n\t\tGrantTime:            grantedTime,\n\t\tGrantDurationSeconds: 5,\n\t}\n\tmessageBytes, _ := json.Marshal(&message)\n\tmessageMacWriter := hmac.New(sha256.New, []byte(\"foobar\"))\n\tmessageMacWriter.Write(messageBytes)\n\tmessageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil))\n\n\treq, _ := http.NewRequest(\"POST\", \"http://127.0.0.1/user/123/url\", nil)\n\n\treq.Header.Set(\"Authorization\", string(messageBytes))\n\treq.Header.Set(\"X-Authorization-HMAC\", string(messageMac))\n\n\tauthenticator := NewHMACAuthenticatorSHA256([]byte(\"foobar\"))\n\tauthenticator.SetTime(requestTime)\n\n\tuser, err := authenticator.GetUser(req)\n\tif user != nil {\n\t\tt.Fatalf(\"Expected authenticator of of an invalid response to return nil\")\n\t}\n\tif err != ErrExpiredGrant {\n\t\tt.Fatalf(\"Unexpected error: %s\", err.Error())\n\t}\n}\n"
  },
  {
    "path": "server/server.go",
    "content": "package server\n\nimport (\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/gorilla/mux\"\n\n\t\"github.com/Imgur/mandible/config\"\n\t\"github.com/Imgur/mandible/imageprocessor\"\n\t\"github.com/Imgur/mandible/imagestore\"\n\t\"github.com/Imgur/mandible/uploadedfile\"\n)\n\ntype Server struct {\n\tConfig            *config.Configuration\n\tHTTPClient        *http.Client\n\tImageStore        imagestore.ImageStore\n\thashGenerator     *imagestore.HashGenerator\n\tprocessorStrategy imageprocessor.ImageProcessorStrategy\n\tauthenticator     Authenticator\n\tstats             RuntimeStats\n}\n\ntype ServerResponse struct {\n\tError   string      `json:\"error,omitempty\"`\n\tData    interface{} `json:\"data,omitempty\"`\n\tStatus  int         `json:\"status\"`\n\tSuccess *bool       `json:\"success\"` // the empty value is the nil pointer, because this is a computed property\n}\n\nfunc (resp *ServerResponse) Write(w http.ResponseWriter, s RuntimeStats) {\n\trespBytes, _ := resp.json()\n\n\tif resp.Status >= http.StatusBadRequest {\n\t\tlog.Println(fmt.Sprintf(\"HTTP error: %d -- %s\", resp.Status, resp.Error))\n\t\ts.Error(resp.Status)\n\t}\n\n\tw.WriteHeader(resp.Status)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(respBytes)\n}\n\n// The success property is a computed property on the response status\n// This can't implement the MarshalJSON() interface sadly because it would be recursive\nfunc (resp *ServerResponse) json() ([]byte, error) {\n\tvar success bool\n\tsuccess = (resp.Status == http.StatusOK)\n\tresp.Success = &success\n\tbytes, err := json.Marshal(resp)\n\tresp.Success = nil\n\treturn bytes, err\n}\n\ntype ImageResponse struct {\n\tLink    string                 `json:\"link\"`\n\tMime    string                 `json:\"mime\"`\n\tName    string                 `json:\"name\"`\n\tHash    string                 `json:\"hash\"`\n\tSize    int64                  `json:\"size\"`\n\tWidth   int                    `json:\"width\"`\n\tHeight  int                    `json:\"height\"`\n\tOCRText string                 `json:\"ocrtext\"`\n\tThumbs  map[string]interface{} `json:\"thumbs\"`\n\tUserID  string                 `json:\"user_id\"`\n}\n\ntype OcrResponse struct {\n\tHash    string `json:\"hash\"`\n\tOCRText string `json:\"ocrtext\"`\n}\n\ntype UserError struct {\n\tUserFacingMessage error\n\tLogMessage        error\n}\n\nfunc NewServer(c *config.Configuration, strategy imageprocessor.ImageProcessorStrategy, stats RuntimeStats) *Server {\n\tfactory := imagestore.NewFactory(c)\n\thttpclient := &http.Client{}\n\tstores := factory.NewImageStores()\n\n\thashGenerator := factory.NewHashGenerator(stores)\n\tauthenticator := &PassthroughAuthenticator{}\n\treturn &Server{c, httpclient, stores, hashGenerator, strategy, authenticator, stats}\n}\n\nfunc NewAuthenticatedServer(c *config.Configuration, strategy imageprocessor.ImageProcessorStrategy, auth Authenticator, stats RuntimeStats) *Server {\n\tfactory := imagestore.NewFactory(c)\n\thttpclient := &http.Client{}\n\tstores := factory.NewImageStores()\n\n\thashGenerator := factory.NewHashGenerator(stores)\n\treturn &Server{c, httpclient, stores, hashGenerator, strategy, auth, stats}\n}\n\nfunc (s *Server) uploadFile(uploadFile io.Reader, fileName string, thumbs []*uploadedfile.ThumbFile, user *AuthenticatedUser) ServerResponse {\n\ttmpFile, err := saveToTmp(uploadFile)\n\tif err != nil {\n\t\treturn ServerResponse{\n\t\t\tError:  \"Error saving to disk!\",\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tupload, err := uploadedfile.NewUploadedFile(fileName, tmpFile, thumbs)\n\tdefer upload.Clean()\n\n\tif err != nil {\n\t\treturn ServerResponse{\n\t\t\tError:  \"Error detecting mime type!\",\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tprocessor, err := s.processorStrategy(s.Config, upload)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating processor factory: %s\", err.Error())\n\t\treturn ServerResponse{\n\t\t\tError:  \"Unable to process image!\",\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\terr = processor.Run(upload)\n\tif err != nil {\n\t\tlog.Printf(\"Error processing %+v: %s\", upload, err.Error())\n\t\treturn ServerResponse{\n\t\t\tError:  \"Unable to process image!\",\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tupload.SetHash(s.hashGenerator.Get())\n\n\tfactory := imagestore.NewFactory(s.Config)\n\tobj := factory.NewStoreObject(upload.GetHash(), upload.GetMime(), \"original\")\n\n\tuploadFilepath := upload.GetPath()\n\tobj, err = s.ImageStore.Save(uploadFilepath, obj)\n\tif err != nil {\n\t\tlog.Printf(\"Error saving processed output to store: %s\", err.Error())\n\t\treturn ServerResponse{\n\t\t\tError:  \"Unable to save image!\",\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tthumbsResp, err := s.buildThumbResponse(upload)\n\tif err != nil {\n\t\tlog.Printf(\"Error processing %+v: %s\", upload, err.Error())\n\t\treturn ServerResponse{\n\t\t\tError:  \"Unable to process thumbnail!\",\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tsize, err := upload.FileSize()\n\tif err != nil {\n\t\treturn ServerResponse{\n\t\t\tError:  \"Unable to fetch image metadata!\",\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\twidth, height, err := upload.Dimensions()\n\n\tif err != nil {\n\t\treturn ServerResponse{\n\t\t\tError:  \"Error fetching upload dimensions: \" + err.Error(),\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tvar userID string\n\tif user != nil {\n\t\tuserID = string(user.UserID)\n\t}\n\n\tresp := ImageResponse{\n\t\tLink:    obj.Url,\n\t\tMime:    obj.MimeType,\n\t\tHash:    upload.GetHash(),\n\t\tName:    fileName,\n\t\tSize:    size,\n\t\tWidth:   width,\n\t\tHeight:  height,\n\t\tOCRText: upload.GetOCRText(),\n\t\tThumbs:  thumbsResp,\n\t\tUserID:  userID,\n\t}\n\n\treturn ServerResponse{\n\t\tData:   resp,\n\t\tStatus: http.StatusOK,\n\t}\n}\n\ntype fileExtractor func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError)\n\nfunc (s *Server) Configure(muxer *http.ServeMux) {\n\n\tvar extractorFile fileExtractor = func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError) {\n\t\tuploadFile, header, err := r.FormFile(\"image\")\n\t\tif err != nil {\n\t\t\treturn nil, \"\", &UserError{LogMessage: err, UserFacingMessage: errors.New(\"Error processing file\")}\n\t\t}\n\n\t\ts.stats.Upload(\"file\")\n\t\treturn uploadFile, header.Filename, nil\n\t}\n\n\tvar extractorUrl fileExtractor = func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError) {\n\t\turl := r.FormValue(\"image\")\n\t\tuploadFile, err := s.download(url)\n\n\t\tif err != nil {\n\t\t\treturn nil, \"\", &UserError{LogMessage: err, UserFacingMessage: errors.New(\"Error downloading URL!\")}\n\t\t}\n\n\t\ts.stats.Upload(\"url\")\n\t\treturn uploadFile, path.Base(url), nil\n\t}\n\n\tvar extractorBase64 fileExtractor = func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError) {\n\t\tinput := r.FormValue(\"image\")\n\t\tb64data := input[strings.IndexByte(input, ',')+1:]\n\n\t\tuploadFile = base64.NewDecoder(base64.StdEncoding, strings.NewReader(b64data))\n\n\t\ts.stats.Upload(\"base64\")\n\t\treturn uploadFile, \"\", nil\n\t}\n\n\ttype uploadEndpoint func(fileExtractor, *AuthenticatedUser) http.HandlerFunc\n\n\tvar uploadHandler uploadEndpoint = func(extractor fileExtractor, user *AuthenticatedUser) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tuploadFile, filename, uerr := extractor(r)\n\t\t\tif uerr != nil {\n\t\t\t\tlog.Printf(\"Error extracting files: %s\", uerr.LogMessage.Error())\n\t\t\t\tresp := ServerResponse{\n\t\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\t\tError:  uerr.UserFacingMessage.Error(),\n\t\t\t\t}\n\t\t\t\tresp.Write(w, s.stats)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tthumbs, err := parseThumbs(r)\n\t\t\tif err != nil {\n\t\t\t\tresp := ServerResponse{\n\t\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\t\tError:  \"Error parsing thumbnails!\",\n\t\t\t\t}\n\t\t\t\tresp.Write(w, s.stats)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresp := s.uploadFile(uploadFile, filename, thumbs, user)\n\n\t\t\tswitch uploadFile.(type) {\n\t\t\tcase io.ReadCloser:\n\t\t\t\tdefer uploadFile.(io.ReadCloser).Close()\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tresp.Write(w, s.stats)\n\t\t}\n\t}\n\n\t// Wrap an existing upload endpoint with authentication, returning a new endpoint that 4xxs unless authentication is passed.\n\tauthenticatedEndpoint := func(endpoint uploadEndpoint, extractor fileExtractor) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\trequestVars := mux.Vars(r)\n\t\t\tattemptedUserIdString, ok := requestVars[\"user_id\"]\n\n\t\t\t// They didn't send a user ID to a /user endpoint\n\t\t\tif !ok || attemptedUserIdString == \"\" {\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuser, err := s.authenticator.GetUser(r)\n\n\t\t\t// Their HMAC was invalid or they are trying to upload to someone else's account\n\t\t\tif user == nil || err != nil || user.UserID != attemptedUserIdString {\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\tlog.Printf(\"Authentication error: %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thandler := endpoint(extractor, user)\n\t\t\thandler(w, r)\n\t\t}\n\t}\n\n\tocrHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\timageID := r.FormValue(\"uid\")\n\t\tif imageID == \"\" {\n\t\t\tresp := ServerResponse{\n\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\tError:  \"Image ID must be passed as \\\"uid\\\"\",\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\n\t\tfactory := imagestore.NewFactory(s.Config)\n\t\ttObj := factory.NewStoreObject(imageID, \"\", \"original\")\n\n\t\tstoreReader, err := s.ImageStore.Get(tObj)\n\t\tif err != nil {\n\t\t\tresp := ServerResponse{\n\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\tError:  fmt.Sprintf(\"Error retrieving image with ID: %s\", imageID),\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\t\tdefer storeReader.Close()\n\n\t\tstoreFile, err := saveToTmp(storeReader)\n\t\tif err != nil {\n\t\t\tresp := ServerResponse{\n\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\tError:  fmt.Sprintf(\"Error saving original image to tmpfile: %s\", imageID),\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\t\tdefer os.Remove(storeFile)\n\n\t\tupload, err := uploadedfile.NewUploadedFile(\"\", storeFile, nil)\n\t\tif err != nil {\n\t\t\tresp := ServerResponse{\n\t\t\t\tError:  fmt.Sprintf(\"Unable to generate UploadedFile object: %s\", imageID),\n\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\t\tupload.SetHash(imageID)\n\t\tdefer upload.Clean()\n\n\t\t//TODO: fix this sp error:\n\t\tprocessor := imageprocessor.DuelOCRStratagy()\n\t\terr = processor.Process(upload)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error runinng DuelOCRStrategy on %+v: %s\", upload, err.Error())\n\t\t\tresp := ServerResponse{\n\t\t\t\tError:  \"Unable to execute OCR strategy\",\n\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\n\t\tocrResp := OcrResponse{\n\t\t\tHash:    upload.GetHash(),\n\t\t\tOCRText: upload.GetOCRText(),\n\t\t}\n\n\t\tresp := ServerResponse{\n\t\t\tData:   ocrResp,\n\t\t\tStatus: http.StatusOK,\n\t\t}\n\n\t\tresp.Write(w, s.stats)\n\t}\n\n\tthumbnailHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\timageID := r.FormValue(\"uid\")\n\n\t\tfactory := imagestore.NewFactory(s.Config)\n\t\ttObj := factory.NewStoreObject(imageID, \"\", \"original\")\n\n\t\tthumbs, err := parseThumbs(r)\n\t\tif err != nil {\n\t\t\tresp := ServerResponse{\n\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\tError:  \"Error parsing thumbnails!\",\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\n\t\tif len(thumbs) != 1 {\n\t\t\tresp := ServerResponse{\n\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\tError:  \"Wrong number of thumbnails, expected 1\",\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\n\t\tstoreReader, err := s.ImageStore.Get(tObj)\n\t\tif err != nil {\n\t\t\tresp := ServerResponse{\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tError:  fmt.Sprintf(\"Error retrieving image with ID: %s\", imageID),\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\t\tdefer storeReader.Close()\n\n\t\tstoreFile, err := saveToTmp(storeReader)\n\t\tif err != nil {\n\t\t\tresp := ServerResponse{\n\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t\tError:  \"Error saving original Image!\",\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\t\tdefer os.Remove(storeFile)\n\n\t\tupload, err := uploadedfile.NewUploadedFile(\"\", storeFile, thumbs)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error processing %+v: %s\", storeFile, err.Error())\n\t\t\tresp := ServerResponse{\n\t\t\t\tError:  \"Unable to process thumbnail!\",\n\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\t\tupload.SetHash(imageID)\n\t\tdefer upload.Clean()\n\n\t\tprocessor, _ := imageprocessor.ThumbnailStrategy(s.Config, upload)\n\t\terr = processor.Run(upload)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error processing %+v: %s\", upload, err.Error())\n\t\t\tresp := ServerResponse{\n\t\t\t\tError:  \"Unable to process thumbnail!\",\n\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t}\n\t\t\tresp.Write(w, s.stats)\n\t\t\treturn\n\t\t}\n\n\t\tts := upload.GetThumbs()\n\t\tt := ts[0]\n\n\t\tif !t.GetNoStore() {\n\t\t\tthumbName := fmt.Sprintf(\"%s/%s\", upload.GetHash(), t.Name)\n\t\t\ttObj = factory.NewStoreObject(thumbName, upload.GetMime(), \"thumbnail\")\n\t\t\terr = tObj.Store(t, s.ImageStore)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error storing %+v: %s\", t, err.Error())\n\t\t\t\tresp := ServerResponse{\n\t\t\t\t\tError:  \"Unable to store thumbnail!\",\n\t\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t\t}\n\t\t\t\tresp.Write(w, s.stats)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\ts.stats.Thumbnail(t.Name)\n\n\t\thttp.ServeFile(w, r, t.GetPath())\n\t}\n\n\trootHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"<html><head><title>An open source image uploader by Imgur</title></head><body style=\\\"background-color: #2b2b2b; color: white\\\">\")\n\t\tfmt.Fprint(w, \"Congratulations! Your image upload server is up and running. Head over to the <a style=\\\"color: #85bf25 \\\" href=\\\"https://github.com/Imgur/mandible\\\">github</a> page for documentation\")\n\t\tfmt.Fprint(w, \"<br/><br/><br/><img src=\\\"http://i.imgur.com/YbfUjs5.png?2\\\" />\")\n\t\tfmt.Fprint(w, \"</body></html>\")\n\t}\n\n\trequestMiddleware := func(handler http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\ts.stats.Request(r.URL.Path)\n\n\t\t\tif os.Getenv(\"MANDIBLE_DEBUG\") == \"true\" {\n\t\t\t\tr.ParseForm()\n\t\t\t\tlog.Printf(\"Request url: %s with get params: %v and Headers: %v\", r.URL.Path, r.Form, r.Header)\n\t\t\t}\n\n\t\t\tstart := time.Now()\n\t\t\thandler(w, r)\n\t\t\telapsed := time.Since(start)\n\n\t\t\ts.stats.ResponseTime(elapsed, r.URL.Path)\n\t\t}\n\t}\n\n\trouter := mux.NewRouter()\n\n\trouter.HandleFunc(\"/file\", requestMiddleware(uploadHandler(extractorFile, nil)))\n\trouter.HandleFunc(\"/url\", requestMiddleware(uploadHandler(extractorUrl, nil)))\n\trouter.HandleFunc(\"/base64\", requestMiddleware(uploadHandler(extractorBase64, nil)))\n\n\trouter.HandleFunc(\"/user/{user_id}/file\", requestMiddleware(authenticatedEndpoint(uploadHandler, extractorBase64)))\n\trouter.HandleFunc(\"/user/{user_id}/url\", requestMiddleware(authenticatedEndpoint(uploadHandler, extractorUrl)))\n\trouter.HandleFunc(\"/user/{user_id}/base64\", requestMiddleware(authenticatedEndpoint(uploadHandler, extractorBase64)))\n\n\trouter.HandleFunc(\"/thumbnail\", requestMiddleware(thumbnailHandler))\n\n\trouter.HandleFunc(\"/ocr\", requestMiddleware(ocrHandler))\n\trouter.HandleFunc(\"/\", requestMiddleware(rootHandler))\n\n\tmuxer.Handle(\"/\", router)\n}\n\nfunc (s *Server) buildThumbResponse(upload *uploadedfile.UploadedFile) (map[string]interface{}, error) {\n\tfactory := imagestore.NewFactory(s.Config)\n\tthumbsResp := map[string]interface{}{}\n\n\tfor _, t := range upload.GetThumbs() {\n\t\tthumbName := fmt.Sprintf(\"%s/%s\", upload.GetHash(), t.Name)\n\t\ttObj := factory.NewStoreObject(thumbName, upload.GetMime(), \"thumbnail\")\n\t\terr := tObj.Store(t, s.ImageStore)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ts.stats.Thumbnail(t.Name)\n\t\tthumbsResp[t.Name] = tObj.Url\n\t}\n\n\treturn thumbsResp, nil\n}\n\nfunc (s *Server) download(url string) (io.ReadCloser, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"User-Agent\", s.Config.UserAgent)\n\n\tresp, err := s.HTTPClient.Do(req)\n\n\tif err != nil {\n\t\t// \"HTTP protocol error\" - maybe the server sent an invalid response or timed out\n\t\treturn nil, err\n\t}\n\n\tif 200 != resp.StatusCode {\n\t\treturn nil, errors.New(\"Non-200 status code received\")\n\t}\n\n\tcontentLength := resp.ContentLength\n\n\tif contentLength == 0 {\n\t\treturn nil, errors.New(\"Empty file received\")\n\t}\n\n\treturn resp.Body, nil\n}\n\nfunc parseThumbs(r *http.Request) ([]*uploadedfile.ThumbFile, error) {\n\tthumbString := r.FormValue(\"thumbs\")\n\tif thumbString == \"\" {\n\t\treturn []*uploadedfile.ThumbFile{}, nil\n\t}\n\n\ttype ThumbRequest struct {\n\t\tWidth         int    `json:\"width\"`\n\t\tMaxWidth      int    `json:\"max_width\"`\n\t\tHeight        int    `json:\"height\"`\n\t\tMaxHeight     int    `json:\"max_height\"`\n\t\tShape         string `json:\"shape\"`\n\t\tCropGravity   string `json:\"crop_gravity\"`\n\t\tCropHeight    int    `json:\"crop_height\"`\n\t\tCropWidth     int    `json:\"crop_width\"`\n\t\tQuality       int    `json:\"quality\"`\n\t\tCropRatio     string `json:\"crop_ratio\"`\n\t\tDesiredFormat string `json:\"format\"`\n\t\tNoStore       bool   `json:\"nostore\"`\n\t}\n\tvar thumbRequests map[string]ThumbRequest\n\terr := json.Unmarshal([]byte(thumbString), &thumbRequests)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn nil, errors.New(\"Error parsing thumbnail JSON!\")\n\t}\n\n\tvar thumbs []*uploadedfile.ThumbFile\n\tfor name, thumbRequest := range thumbRequests {\n\t\tthumb := uploadedfile.NewThumbFile(\n\t\t\tthumbRequest.Width,\n\t\t\tthumbRequest.MaxWidth,\n\t\t\tthumbRequest.Height,\n\t\t\tthumbRequest.MaxHeight,\n\t\t\tname,\n\t\t\tthumbRequest.Shape,\n\t\t\t\"\", // shape\n\t\t\tthumbRequest.CropGravity,\n\t\t\tthumbRequest.CropWidth,\n\t\t\tthumbRequest.CropHeight,\n\t\t\tthumbRequest.CropRatio,\n\t\t\tthumbRequest.Quality,\n\t\t\tthumbRequest.DesiredFormat,\n\t\t\tthumbRequest.NoStore,\n\t\t)\n\n\t\tthumbs = append(thumbs, thumb)\n\t}\n\n\treturn thumbs, nil\n}\n\nfunc saveToTmp(upload io.Reader) (string, error) {\n\ttmpFile, err := ioutil.TempFile(os.TempDir(), \"image\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\n\t\treturn \"\", err\n\t}\n\n\tdefer tmpFile.Close()\n\n\t_, err = io.Copy(tmpFile, upload)\n\tif err != nil {\n\t\tfmt.Println(err)\n\n\t\treturn \"\", err\n\t}\n\n\treturn tmpFile.Name(), nil\n}\n"
  },
  {
    "path": "server/server_test.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/Imgur/mandible/config\"\n\t\"github.com/Imgur/mandible/imageprocessor\"\n\t\"github.com/Imgur/mandible/imagestore\"\n)\n\nfunc TestRequestingTheFrontPageGetsSomeHTML(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tstats := &DiscardStats{}\n\tserver := NewServer(cfg, imageprocessor.PassthroughStrategy, stats)\n\n\tmuxer := http.NewServeMux()\n\n\tserver.Configure(muxer)\n\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\tres, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when retrieving %s: %s\", ts.URL, err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body of %s: %s\", ts.URL, err.Error())\n\t}\n\n\tt.Logf(\"Response to %s/ was: %s\", ts.URL, body)\n\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tsbody := string(body)\n\n\tif !strings.Contains(sbody, \"<html>\") {\n\t\tt.Fatalf(\"Did I get HTML back? Didn't find <html>...\")\n\t}\n}\n\nfunc TestPostingBase64FilePutsTheFileInStorageAndReturnsJSON(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tstats := &DiscardStats{}\n\tserver := NewServer(cfg, imageprocessor.PassthroughStrategy, stats)\n\n\tmuxer := http.NewServeMux()\n\n\tserver.Configure(muxer)\n\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\t// a 1x1 base64 encoded transparent GIF\n\tb64bytes, _ := base64.StdEncoding.DecodeString(b64gif)\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"image\", b64gif)\n\n\tres, err := http.PostForm(ts.URL+\"/base64\", values)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when uploading base64 GIF: %s\", err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body: %s\", err.Error())\n\t}\n\n\tt.Logf(\"Response to /base64 was: %s\", body)\n\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tvar serverResp ServerResponse\n\tvar imageResp ImageResponse\n\terr = json.Unmarshal(body, &serverResp)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing response: %s\", err.Error())\n\t}\n\n\tif !*serverResp.Success {\n\t\tt.Fatalf(\"Uploading GIF was unsuccessful\")\n\t}\n\n\timageRespBytes, _ := json.Marshal(serverResp.Data)\n\n\terr = json.Unmarshal(imageRespBytes, &imageResp)\n\n\tif imageResp.Height != 1 {\n\t\tt.Fatalf(\"Expected height to be 1, instead %d\", imageResp.Height)\n\t}\n\n\tif imageResp.Width != 1 {\n\t\tt.Fatalf(\"Expected width to be 1, instead %d\", imageResp.Width)\n\t}\n\n\tif imageResp.Size != 42 {\n\t\tt.Fatalf(\"Expected size to be 42, instead %d\", imageResp.Size)\n\t}\n\n\tif imageResp.Mime != \"image/gif\" {\n\t\tt.Fatalf(\"Expected image MIME type to be image/gif, instead %s\", imageResp.Mime)\n\t}\n\n\timmStore := server.ImageStore\n\texists, err := immStore.Exists(&imagestore.StoreObject{Id: imageResp.Hash})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error checking if %s exists in in-memory image store: %s\", imageResp.Hash, err.Error())\n\t}\n\n\tif !exists {\n\t\tt.Fatalf(\"Expected to find %s in the in-memory storage, instead absent. Dump: %+v\", imageResp.Hash, immStore)\n\t}\n\n\tstoredBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: imageResp.Hash})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error fetching %s from in-memory image store: %s\", imageResp.Hash, err.Error())\n\t}\n\tstoredBodyBytes, _ := ioutil.ReadAll(storedBodyReader)\n\n\tif !bytes.Equal(storedBodyBytes, []byte(b64bytes)) {\n\t\tt.Fatalf(\"Stored bytes %s != %s\", storedBodyBytes, []byte(b64bytes))\n\t}\n}\n\nfunc TestAuthentication(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tauthenticator := NewHMACAuthenticatorSHA256([]byte(\"foobar\"))\n\tstats := &DiscardStats{}\n\tserver := NewAuthenticatedServer(cfg, imageprocessor.PassthroughStrategy, authenticator, stats)\n\n\tmuxer := http.NewServeMux()\n\n\tserver.Configure(muxer)\n\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"image\", b64gif)\n\n\treq, err := http.NewRequest(\"POST\", ts.URL+\"/user/123/base64\", strings.NewReader(values.Encode()))\n\tif err != nil {\n\t\tt.Fatalf(\"Error when forming authenticated base64 GIF upload request: %s\", err.Error())\n\t}\n\n\tmessage := AuthenticatedUser{\n\t\tUserID:               \"123\",\n\t\tGrantTime:            time.Now(),\n\t\tGrantDurationSeconds: 365 * 24 * 3600,\n\t}\n\tmessageBytes, _ := json.Marshal(&message)\n\tmessageMacWriter := hmac.New(sha256.New, []byte(\"foobar\"))\n\tmessageMacWriter.Write(messageBytes)\n\tmessageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil))\n\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.Header.Set(\"Authorization\", string(messageBytes))\n\treq.Header.Set(\"X-Authorization-HMAC\", string(messageMac))\n\n\thttpclient := http.Client{}\n\tres, err := httpclient.Do(req)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when uploading authenticated base64 GIF: %s\", err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body: %s\", err.Error())\n\t}\n\n\tt.Logf(\"Response to /base64 was: %s\", body)\n\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tvar serverResp ServerResponse\n\tvar imageResp ImageResponse\n\terr = json.Unmarshal(body, &serverResp)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing response: %s\", err.Error())\n\t}\n\n\tif !*serverResp.Success {\n\t\tt.Fatalf(\"Uploading GIF was unsuccessful\")\n\t}\n\n\timageRespBytes, _ := json.Marshal(serverResp.Data)\n\n\terr = json.Unmarshal(imageRespBytes, &imageResp)\n\n\tif imageResp.Mime != \"image/gif\" {\n\t\tt.Fatalf(\"Expected image MIME type to be image/gif, instead %s\", imageResp.Mime)\n\t}\n\n\tif imageResp.UserID != \"123\" {\n\t\tt.Fatalf(\"Expected user ID to be \\\"123\\\", instead \\\"%s\\\"\", imageResp.UserID)\n\t}\n}\n\nfunc TestGetFullWebpThumb(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tstats := &DiscardStats{}\n\tserver := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats)\n\tmuxer := http.NewServeMux()\n\tserver.Configure(muxer)\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\tthumbsJson, _ := json.Marshal(map[string]interface{}{\n\t\t\"webp\": map[string]interface{}{\n\t\t\t\"format\": \"webp\",\n\t\t},\n\t})\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"image\", b64dan)\n\tvalues.Add(\"thumbs\", string(thumbsJson))\n\n\tres, err := http.PostForm(ts.URL+\"/base64\", values)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when uploading base64 image: %s\", err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body: %s\", err.Error())\n\t}\n\n\tt.Logf(\"Response to /base64 was: %s\", body)\n\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tvar serverResp ServerResponse\n\tvar imageResp ImageResponse\n\terr = json.Unmarshal(body, &serverResp)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing response: %s\", err.Error())\n\t}\n\n\tif !*serverResp.Success {\n\t\tt.Fatalf(\"Uploading image was unsuccessful\")\n\t}\n\n\timageRespBytes, _ := json.Marshal(serverResp.Data)\n\n\terr = json.Unmarshal(imageRespBytes, &imageResp)\n\n\tif len(imageResp.Thumbs) == 0 {\n\t\tt.Fatalf(\"Expected thumbs to contain data, instead blank\")\n\t}\n\n\tif _, ok := imageResp.Thumbs[\"webp\"]; !ok {\n\t\tt.Fatalf(\"Expected webp thumb, not given\")\n\t}\n\n\timmStore := server.ImageStore\n\tstoreId := imageResp.Hash + \"/webp\"\n\n\texists, err := immStore.Exists(&imagestore.StoreObject{Id: storeId})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error checking if %s exists in in-memory image store: %s\", storeId, err.Error())\n\t}\n\tif !exists {\n\t\tt.Fatalf(\"Expected to find %s in the in-memory storage, instead absent. Dump: %+v\", storeId, immStore)\n\t}\n\n\tstoredBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: storeId})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error fetching %s from in-memory image store: %s\", storeId, err.Error())\n\t}\n\tstoredBodyBytes, _ := ioutil.ReadAll(storedBodyReader)\n\n\tif len(storedBodyBytes) == 0 {\n\t\tt.Fatalf(\"Expected webp thumbnail to be larger than 0 bytes\")\n\t}\n\n\tif int64(len(storedBodyBytes)) >= imageResp.Size {\n\t\tt.Fatalf(\"Expected thumbnail to be smaller than original image, %v vs %v\", int64(len(storedBodyBytes)), imageResp.Size)\n\t}\n}\n\nfunc TestGetSizedWebpThumb(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tstats := &DiscardStats{}\n\tserver := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats)\n\tmuxer := http.NewServeMux()\n\tserver.Configure(muxer)\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\tthumbsJson, _ := json.Marshal(map[string]interface{}{\n\t\t\"webp\": map[string]interface{}{\n\t\t\t\"format\": \"webp\",\n\t\t},\n\t\t\"webpthumb\": map[string]interface{}{\n\t\t\t\"format\": \"webp\",\n\t\t\t\"shape\":  \"custom\",\n\t\t\t\"width\":  10,\n\t\t\t\"height\": 10,\n\t\t},\n\t})\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"image\", b64dan)\n\tvalues.Add(\"thumbs\", string(thumbsJson))\n\n\tres, err := http.PostForm(ts.URL+\"/base64\", values)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when uploading base64 iamge: %s\", err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body: %s\", err.Error())\n\t}\n\n\tt.Logf(\"Response to /base64 was: %s\", body)\n\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tvar serverResp ServerResponse\n\tvar imageResp ImageResponse\n\terr = json.Unmarshal(body, &serverResp)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing response: %s\", err.Error())\n\t}\n\n\tif !*serverResp.Success {\n\t\tt.Fatalf(\"Uploading image was unsuccessful\")\n\t}\n\n\timageRespBytes, _ := json.Marshal(serverResp.Data)\n\n\terr = json.Unmarshal(imageRespBytes, &imageResp)\n\n\tif len(imageResp.Thumbs) == 0 {\n\t\tt.Fatalf(\"Expected thumbs to contain data, instead blank\")\n\t}\n\n\tif _, ok := imageResp.Thumbs[\"webp\"]; !ok {\n\t\tt.Fatalf(\"Expected webp thumb, not given\")\n\t}\n\n\timmStore := server.ImageStore\n\tstoreId := imageResp.Hash + \"/webp\"\n\tstoreIdSmall := imageResp.Hash + \"/webpthumb\"\n\n\texists, err := immStore.Exists(&imagestore.StoreObject{Id: storeIdSmall})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error checking if %s exists in in-memory image store: %s\", storeIdSmall, err.Error())\n\t}\n\tif !exists {\n\t\tt.Fatalf(\"Expected to find %s in the in-memory storage, instead absent. Dump: %+v\", storeIdSmall, immStore)\n\t}\n\n\tstoredBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: storeId})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error fetching %s from in-memory image store: %s\", storeId, err.Error())\n\t}\n\tstoredBodyReaderSmall, err := immStore.Get(&imagestore.StoreObject{Id: storeIdSmall})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error fetching %s from in-memory image store: %s\", storeIdSmall, err.Error())\n\t}\n\tstoredBodyBytes, _ := ioutil.ReadAll(storedBodyReader)\n\tstoredBodyBytesSmall, _ := ioutil.ReadAll(storedBodyReaderSmall)\n\n\tif len(storedBodyBytesSmall) == 0 {\n\t\tt.Fatalf(\"Expected webp thumbnail to be larger than 0 bytes\")\n\t}\n\n\tif len(storedBodyBytesSmall) >= len(storedBodyBytes) {\n\t\tt.Fatalf(\"Expected thumbnail to be smaller than original image, %v vs %v\", len(storedBodyBytesSmall), len(storedBodyBytes))\n\t}\n}\n\nfunc TestTooLarge(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tstats := &DiscardStats{}\n\tserver := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats)\n\tmuxer := http.NewServeMux()\n\tserver.Configure(muxer)\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\tthumbsJson, _ := json.Marshal(map[string]interface{}{\n\t\t\"webp\": map[string]interface{}{\n\t\t\t\"format\": \"webp\",\n\t\t\t\"shape\":  \"custom\",\n\t\t\t\"width\":  20000,\n\t\t\t\"height\": 20000,\n\t\t},\n\t})\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"image\", b64dan)\n\tvalues.Add(\"thumbs\", string(thumbsJson))\n\n\tres, err := http.PostForm(ts.URL+\"/base64\", values)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when uploading base64 iamge: %s\", err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body: %s\", err.Error())\n\t}\n\n\tt.Logf(\"Response to /base64 was: %s\", body)\n\n\tif res.StatusCode != 500 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tvar serverResp ServerResponse\n\terr = json.Unmarshal(body, &serverResp)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing response: %s\", err.Error())\n\t}\n\n\tif *serverResp.Success {\n\t\tt.Fatalf(\"Uploading large image was successful\")\n\t}\n}\n\nfunc TestTooSmall(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tstats := &DiscardStats{}\n\tserver := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats)\n\tmuxer := http.NewServeMux()\n\tserver.Configure(muxer)\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\tthumbsJson, _ := json.Marshal(map[string]interface{}{\n\t\t\"webp\": map[string]interface{}{\n\t\t\t\"format\": \"webp\",\n\t\t\t\"shape\":  \"custom\",\n\t\t\t\"width\":  0,\n\t\t\t\"height\": 0,\n\t\t},\n\t})\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"image\", b64dan)\n\tvalues.Add(\"thumbs\", string(thumbsJson))\n\n\tres, err := http.PostForm(ts.URL+\"/base64\", values)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when uploading base64 image: %s\", err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body: %s\", err.Error())\n\t}\n\n\tt.Logf(\"Response to /base64 was: %s\", body)\n\n\tif res.StatusCode != 500 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tvar serverResp ServerResponse\n\terr = json.Unmarshal(body, &serverResp)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing response: %s\", err.Error())\n\t}\n\n\tif *serverResp.Success {\n\t\tt.Fatalf(\"Uploading small image was successful\")\n\t}\n}\n\nfunc TestGetTallThumb(t *testing.T) {\n\tcfg := &config.Configuration{\n\t\tMaxFileSize: 99999999999,\n\t\tHashLength:  7,\n\t\tUserAgent:   \"Foobar\",\n\t\tStores:      make([]map[string]string, 0),\n\t\tPort:        8888,\n\t}\n\n\tmemcfg := make(map[string]string)\n\tmemcfg[\"Type\"] = \"memory\"\n\tcfg.Stores = append(cfg.Stores, memcfg)\n\tstats := &DiscardStats{}\n\tserver := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats)\n\tmuxer := http.NewServeMux()\n\tserver.Configure(muxer)\n\tts := httptest.NewServer(muxer)\n\tdefer ts.Close()\n\n\tthumbsJson, _ := json.Marshal(map[string]interface{}{\n\t\t\"tallthumb\": map[string]interface{}{\n\t\t\t\"shape\":        \"custom\",\n\t\t\t\"crop_gravity\": \"north\",\n\t\t\t\"crop_ratio\":   \"1:2.25\",\n\t\t\t\"max_width\":    10,\n\t\t},\n\t})\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"image\", b64dan)\n\tvalues.Add(\"thumbs\", string(thumbsJson))\n\n\tres, err := http.PostForm(ts.URL+\"/base64\", values)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when uploading base64 iamge: %s\", err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to read response body: %s\", err.Error())\n\t}\n\n\tt.Logf(\"Response to /base64 was: %s\", body)\n\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"Unexpected status code %d\", res.StatusCode)\n\t}\n\n\tvar serverResp ServerResponse\n\tvar imageResp ImageResponse\n\terr = json.Unmarshal(body, &serverResp)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing response: %s\", err.Error())\n\t}\n\n\tif !*serverResp.Success {\n\t\tt.Fatalf(\"Uploading image was unsuccessful\")\n\t}\n\n\timageRespBytes, _ := json.Marshal(serverResp.Data)\n\n\terr = json.Unmarshal(imageRespBytes, &imageResp)\n\n\tif len(imageResp.Thumbs) == 0 {\n\t\tt.Fatalf(\"Expected thumbs to contain data, instead blank\")\n\t}\n\n\tif _, ok := imageResp.Thumbs[\"tallthumb\"]; !ok {\n\t\tt.Fatalf(\"Expected cropped thumb, not given\")\n\t}\n\n\timmStore := server.ImageStore\n\tstoreId := imageResp.Hash\n\tstoreIdSmall := imageResp.Hash + \"/tallthumb\"\n\n\texists, err := immStore.Exists(&imagestore.StoreObject{Id: storeIdSmall})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error checking if %s exists in in-memory image store: %s\", storeIdSmall, err.Error())\n\t}\n\tif !exists {\n\t\tt.Fatalf(\"Expected to find %s in the in-memory storage, instead absent. Dump: %+v\", storeIdSmall, immStore)\n\t}\n\n\tstoredBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: storeId})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error fetching %s from in-memory image store: %s\", storeId, err.Error())\n\t}\n\tstoredBodyReaderSmall, err := immStore.Get(&imagestore.StoreObject{Id: storeIdSmall})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error fetching %s from in-memory image store: %s\", storeIdSmall, err.Error())\n\t}\n\tstoredBodyBytes, _ := ioutil.ReadAll(storedBodyReader)\n\tstoredBodyBytesSmall, _ := ioutil.ReadAll(storedBodyReaderSmall)\n\n\tif len(storedBodyBytesSmall) == 0 {\n\t\tt.Fatalf(\"Expected webp thumbnail to be larger than 0 bytes\")\n\t}\n\n\tif len(storedBodyBytesSmall) >= len(storedBodyBytes) {\n\t\tt.Fatalf(\"Expected thumbnail to be smaller than original image, %v vs %v\", len(storedBodyBytesSmall), len(storedBodyBytes))\n\t}\n}\n\nvar (\n\tb64gif = \"R0lGODlhAQABAIAAAAAAAP\" + \"/\" + \"/\" + \"/yH5BAEAAAAALAAAAAABAAEAAAIBRAA7\"\n\tb64dan = \"iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAIAAACRXR/mAAAWAUlEQVRYw7V5eZBdV3nnd87dl/fu23tfXneru9Wtllp7y8Y2MpJtIMYTiBkYMFAZCFtqKiQMJjOkUsNUYJIKqRomhRNCGDAwsROQDHiRN2HtUktqSd3qVu/L63799v3d/d5z5g9M4mQIIn/Mr84ft26duvWr7/6+3/d956ADCbXumI6HHAwxzPgezjkucAAuSBgrLHIJbfhIlmTHtVxZ5m2L1vVEQnN1iwNbFGG9BG1BzrM9cGmZsm97+D0PffCJncN9mXTlmW/+T7c6wyP+8U89OXbgYLWqZzcKQ2GobLyUWr2Smr1dmzdOrkJ/r7RH9nMlYhRB0mDwoMBu5I0CkC4Gt/lkDfx9HeEhx8o16QUHAODxLjZS9TcNKji2KsN8wWyLQNdupdEw1SC0JyNr9fZQf2h3f9ys6cRzM47/xT/+w/GJAwAAAD2wdOr7G7Fd9/zWRz7y8zdwD9y+NvXs0yupSt2xpG4O1CCe2BOP+3nkGhtZulKC3FkPCQAMQBJAAagDjPHQF4JUHk4CWABDAKMAFACxQDzo1SB5fBfpHipnq7prarFIIBD1AvFgS7vTqNSzq+n0qsMnDk3cGwlzyWhcdZscD7NZ9Oh/+B0AeOW11774X5+8MTkFrMhFE67VCGh8F891dqgV30rNpVglxEVakd9EHbzcyZBNijlZe3skrCbEYCTou2wJy54UCoXY4bYWnuGoLAa1cHVz7vat01hCpbU1IYBFAeyMGeofNoPxeqMc0VTbIOupYjq9Fg2HRoYH3nH8Hb/5H5+8fGVq8vz5l09ffPHFF+FXIxgIaFpQo+jcqec4CRtYSqgBUWAdhuOAQwIVMQ7wgmW6hXrFcizXrqfXZ//+r34wf/H2vl2gRvHaGnGakOyBloGWConrdnmgs7Vvx0EDeOrwum5PvvoPRcx/8MOfeOmF888+/wr8W8BWrVw52/Sb1uX8FmWobdBKveJ7nqnbmWqD4aQWlW9VeFmprk5fmztfCwCEYyg2mIR4LLu5LcR1XlL29fTNr8nXbywin91/5OjSypZu1ro6drz8o0vb6392/3jfeK+6st5s/JqcWJb9vc/9yWah4NkucT1AKK5E69RByJFZrq67QVW6bywZijaLc9nMSv3h4+HMagUwyyC5a7CTC8vVdG7rVioYLFK3/vLVxrmpa3+a6A9Ijf/1F8+7oXiyNXQ1XZKFzYO7Yn0KXW6o06ncr8OMYao11vFkQjQACSDGCmGOa3i2iKGXwKG4lGypBGTXs3yGcXqGI1pbBAtSJN5HefnqhctzF1NLW87klU270HSBSRvuG9duW6u5etFbqDXzTZMCbFWs8b6OUI9/qC+6smHXbPtXc8IYM+0sGyBEABjBOIDQtGMqthHzfNd2w8Rrj1mBKAlH1Y07teK2nxyNhxN929laoVgqZtdIOe+UQAKQGcjVvYbjcQA1F3Ilmye0DEAAAEBgICDYbKh1ICZgYt3e1O9Ki816XgwgjhHBKOVTAUAAaAfYI2G2g5nMunjTTChbmRK0akg3vOzmAkeF7r62AM8FhsaU3wpIctBxvUyxsbGQqtQKOm7OXK+U6qACEAAXQKNQqdQHhdaiZQeDXkcikM7fRWZsLKTRaq1OYdXzEUAPQAeDNBnCIrOccZs+H6BQKLmqSrV2UNTWh4/e19bWHolpWjAKiAVVBlUEwwSLgmlvbiwuL90+r5wplO1s0ZxZKhUphGRoT3YDW7s+v8SaVJPVNNyFFvrvv/8H/+MvvsYCSABhgCEBNUW8YvmjAfAZJmuyySA5cF9PKBGPReOHJw51j+wBOQDIB5YDH0PTAUSBw0B4MDwwDDe3nc5texjrHj19/szZy1fEWBQLXDqzvr3myCxeb+Ca5f3qTGQ+9sQnGeKsLC/1A/SyKCzD2QbZ9uDe+ztKDXMh64wPsAce7Bkd2L97566OjnbgRfApeDYl1Pd85ANQjmIBYZG4PvI8RpRDoYgmy4FgYGjXrlhLtFYqbS4vrs05rIM8h6Ztcldt4R+/fn7i+LvG+rqbAADwsyYtEohFg70j/Rww4zK0tDEUeIFHImAgDLFdIEB9FvkMSxkAQAgwUJ/YwIPPeMQ1wPbBcp1q1a/ooz1jw8O7tIDWGcetEdrVGUGYubtB1E2fATR+YP/Jy5fuEKhSAICBHa0T/ZH1S6uEUE3zdu8Z2dk/Gg5HAFEQeRAVQCyiCAgABd/zgTIMiMhnkeMgy0KW6zu27/u1aq3eqKvxUL1p5CpbSlg5eGCwYaFMsX6XTGzt7jO4sNK5+9O/+0c3V2dyG2vLs7dc26ivlfIV4mGQAmyAESJyCHiGUh8BAkQopr7lYsogj0WOB8igPEtZFrM8YI74LkEMweBjQomjuHxPx8D06jaDLYE6CYnc3ei//tdPeTwLlh8Vgw7TzG9vPvOtb65Nff/69HwVoF+CiByIaZ3ACmA4LkJYRSzlkEsRAt8njONgDyNOIoQS8BFlwWWJ71LfIb5DiccyrOuavGAO7+lcnp2fvjabrvwa9eepr35ho5DhbSIyaslt+qwwkoh0ydyNO80GAEIwtvPQYHIMPB9cn5VkjDhKKEgClhTsM2AB8VkCPDYNxjURyxCFQxbn2k3XchiEAVOf4ZpG06OWEgnnUoV81bs7re8//fTPn/btPvy+J35zZOLYM1//+uqNCmtDfyT4zo/enxwZBs8AJQAiwhwDjoUoARcD9SHWC4FWDED9BhgqNXXADsIUOy7ne5zvc4RFxPZ9h5q0spFmFT6eDLP5Clh3YcYghGNAKUChWWcQRNq6NaM+1JiLuxbPK7/3t3/ZOfFuYCmmpJHOLy+s6RWTIWxuaev26+ezM7eM4pKMDY7nUSAASgBhhGwKCLOixFBkOMT2kNVwc8Xs1JWb2Y2iHMGZKlNtunepiQKDDEo5ANN1lheXJq9Mf2C87zOPPuz6mfmtVEB2enYOL3n84x/7428883pH745gYuDqzZWFjXy64ly9Pp1NrQ+0tahxBZwmNSiql0gh69sOViRWFBHDGq7XqJQKta0LF1ZqJdqawNcWTY/+qlARAqztEwD4x+JpV3OdIZl91/F+PdVRTw/GYvjSy0bOOHfxat/Yvg+959j01KTPu6HRZKaQQSWlYfPf+D8/vWd24eHPfgozHNQbhlVaW1xNl7KW54mSghhEVV/EIb2ByizcSUuWb91NWoT97Oe+pPKSTxESEctIgmP0D3WA0dxc26Ieam+Jy1gaC7m/f9/YkmH+5AffW5y68UalVI0mHjw0HrQLnGupQe3ET85+5+zSpz/0GG83N9cz+cz6rdVlD/k7h3clNA3zni1DgyXA8pZu8wDO/0NE5tnDY8n9XWxMtbs72llVE4DaEiOV641CIRv16uoD/ZAxSltmJJgI8xJIIXli5+fd0qnT17tHR0cP7e9YTb987dbRiXeUlBvlOzcfOjJuiotf/ObzJ55/1fHsw6Ndh3cPR3tHo4Egdt1SUV9ZmBYVyxFxT7v87B+9jzQbnIyuzpl/9t3J3h3tQcH/jaPjxw4llMEIMBY0V8EDBAAYQAAwAQCgFcHqc38jde0oVRt3rvxob3+f0jUIg+1Gc3P2J5c6kmMoIkhiq2kahfU0sYyx0eRT3/mbp3/66k9//Mrc6Rsf+MKTx963XwzHrAYNE7ZL5USB5Ot6qWzeXssFYjgZk3zbjEdV1xQuTW1/+9tf+7u//cbK6oLjmQ29qdvIMF2GBRYA4mrQsAzwPACIK5KkaSCZQkPv6UtShgDySd2SE4OD98vbc8t2gdioIIakYJBN7j9o68a5c1e+/Aefb9k/Ri7efPdI/8MPHs9W9af++sTQQGdhbWmwt3380EO+oK6ffO3UGxff+uN4gIOPfybWvtOhw65HeVHlFU6V5IAqswDgMmj/oSO3V1Yc29nVOwJCAIZ7zMKVejEb7e4E8LBDoWRobQktGKzenM+tbabXa1mwlu/c2sxkPv6pjx//d4/C2uxaPt10zI5Ix0hcaPtkhJfxjclXeAGvzC1mGm7Va4wd2LG+WtOrZUI8DqMgB8VS8dO/++jgyO5SvgCEEs8HzyHUZYMcX67VQuHQM8+enN/WtXoDqgR8UUbYwQQRDJZNbRsQCwQQFkN794YG9uzwGIdzyht39uwaTewZhfI2WIwqcpi6J5/9+wceOtaZTBBdf/s9j5ieszR/e/n6qrxD//JXDz75sdM3L3kA4BHKMQwD/lf/2xd+icuHMHYAXjj9ajafrTm0US6dDEr/6bF3huSmuCMKIADhiNFAHsKiSiUBBYMQEjBFottssSuM34D1BUpZ5EkVZD34zqNmoTlz9qIUUGReFJRg0dSNOg7JdmsyPbeUKxffNFIK4LOYgP9WH+U4FjM8L4hon6JYur78i6QVpICNpBYj/+UnDr3r/e+NYk3kBBA5pGhEkKmsMkoQmk0ztW4Uc2FZwAJQxkNaAkre6TM/K7jWIw+826zXbs3ebBiO5/g5vVi1nKWV21YyvVGEa6+96aT7x/fJUDvy4CNvf/f7zWZNZXmGUgZhiihBhI3ynOWxImA70j6b27Qd+7HH/n2Pmg/0EKteMFWecQjveZRhMYNBJ9BompmMYdVNu1reLPT2Jbkd/dC+A8SmzFzlmjbSQq0tLYrIluoNt2bnm4WN8srrb9SvTHKEYQAsAARAtjPbbSqbWl372cs/rVcrjmWahmmalmMbruewPMuSUBD52JNkFiHGd3584lsffehg575dlVI1KLW7iCLHQTWdIYyNDbNepz5Ed+3yfPu5P//qC8+f+PDn/0usZw9wTTPAeXUWAAPDUMwC4Lpj5xq6UfMV23aog3wW4M1oZXJZzo9PPfciwIu/rJ2nkNNtTRJtp8EDdAG3AG5bTBF5tlgzEgg88ASGCyqiA262USccam1JEMdgFem9v/2Jcy+8dOrET7SL19aW1y+8fu3Yex7hRR4wBpYjNp1fXCoh/ZNf+dLQg8dPfeCzlHq/0BUAAIu9Vk3J1vRfMmLsVaWZSq1umh4Q10MRIvtgPfmRhx1S3a42A1rMJS5hGTUY4hTZsu3FteWt9JamBTy9ks5vqx1dJcc7fea1wvbmYM/w0eMPd/Z02LWyrjeyuULJM8aPTnQO9/WOH1ad8sZqpqMlsjPZHlKkbLnOOSRnWL+0g0Af701c36hsU9cGMBEjU/quwzu+/7XPvXLmhcm5jaMPvFNTRODEeKK9pbPTNvQf/vhEZmWzXQtF4iEhGjYZQVKjlmcsLc3cN/a2gVgftpuOZ+X12ura+ts++qFAWC3Nzwf6h3h9izIYxdsBePCslZV8eX7x7PnL333u9Mzy+r+MVkJifFaNaEGPxUXLsoB+5XceHdw30ijU55YWDddQlCAlgDheVoJ1s9HZ0fnAvQ96q3kmaxpFo7qZpbpTadZdjrWw0tB14jc4TFMrGy3jw117j29dejE80C+EOt3SNNM+BBAFCACOR2KJjuGBex5626c/8V7Vdl69eOOf9VvbFtkwGim9WbVtAHjiyPh//sR7kdXEjHJncW4lteAQQAyrhLXtXCaf3e5p61S7Eh29nd2dgzvah2J8zMs3lq5c3rqzWKrWG77OqgjZDgjC3vd/1GimK9tbiZ1H3Nw6iMDI/ZWl2frKTcvKOnqeVwWEWeDIPRO7n3/xTCZX+qfJx6CUE/juSPjYvqHHDiR/4/j9EIuQQjHRoo3uHLn1o2kfZpouqXpOOBQhlp+PFoOtcd+upApb5YyZULt6BgYdzu1HjtARrXk1RQ7WK/W20UFAofLsq6GeAYAA6EWuqw1AWLsxOXX5tSMTD4wemADP8zzMmDqicHT/yPXpxbccJGH45p88+Z3vfuU9x0YG9yaBZYBBoKrUo6Fo9M7s1fnZUi6fohwfjsSIy2lKqLWrHTFsrlS5MnNjKrdSkJ1gT1vrQH9Xb3ssEiIeUy7WBsd3SyEuNTnZtnMYcwzZXmbaOozs9g/+/E8FzLfGu9xijkEmDcZYPgTlRqVa8DwS12KpbJYCMACQzxcef8eEsGMnNGsUiwQzIIrAcaqsdHX0TF+7klp3edV0/CamcjiktcdVJhSOtXZHRJVwAhsUtVDQMW3HbHiELK+mbl29MX7wkJpQVmbmu4eSeq30o6f+crAjKoZaL516OZXfdH2IKGLbkXEh1Is4GSHXdY1b567WGv5GMQ+EMCzLpjL57z370sTBA117HkGKDxWLADDRIPFoRI5E4m03rl5KbZoGykk8p2mBUiUXYCVZCtqOjxkuEg5zLItZdiuXX9lcn5yc+vaJM/GgenDP3q217UBIDLT3fOkzf7gwdfvY+96vBRI5veEa1eG+rpa9+4ALALDAgVmtn79w89b8FisIkUCAwRgTQhqm9e2n/4Ga+sTEvXx7D8Y+cQiOqmD7sXA83huau3F7btqvoRqhiBKlYjvlhrmRz20WNijD5HK1uZWbs6uzz710eT3V0Hlt5c7tT33wCZ7wSwtz3XsONzO5xampYxNHJCVqmH6zlu1pDSZ2DFApiEAE5CPDmLmzAASN9LfuHmx/k9bPhXbmwuUfPP1D5Dm7BncIbYPASkjlgKBEuK0/mUhtLF+eqS8v5zzL9HFjcXOjWK8rslgqlG/MzLx+9o3XLszNbzZ04LRgMJXe/Nxvf5BUGhdOvbr37Q8mRCGzMsPyvGV6iFcYzxob7pF6O5AQoiAjBAKQW9dvXbhwJSzrvlP6Z7QAoNponnrt3HeePpnf3EqIXGtvkon0SiwvMELnzphs5lcXSpObxVvTqzeXVuZX11fW1+fuTJ+5Mju9pusWAIBtm5VqyaFQS904e/LvGIru/cCHjdSm4dlSJCAqku84gtsc6u9EEY3KUYAwohS53vydhe/98PnsVnkjXflHWuitPts0zQuTU3/19A8nX59s5NMBVYy3dPa0Dyf7e6Ihm9XLhaxd0UmlbKW3aqvbes0kgMWAojmO+eYkoygWLQyNJjuTfZXZW2XbHTxyz8joztZgUK9sdUQCQS1MNA2pLQgkAIfqte18bnHxcrZhi7KEWJb1vLuN3hgf3jt634GJZLJHZJukuVxaX8wsZ5fmizeKJP3z2xeEMca+/+an9uzf87+/9bXzJ57tjgUZz9+5ZyIUaZUwlRXs17axUUcBBdp7INYPOAhWvZlLT96aOnvhnGm7iiCyv84puU/IxeszF6/PAEAsEu5oi8sigibK2GjrF3soJb7/T2JYvLOgqO3DYwdlu3zvkXvAg3pBB4GHkMhoMcBANRGAIs8CXgUPOSZwgixJmkvqDub+pbbuCsO0coXSVqa0VdKrzr86tHuud+naNcwxMUyTLWHMghgIMCKPFIHaBjCIRkLASUhQAfNgOrVaEQRhPbU9uzyPOIahlML/H2S20ucvXM2ks/eO9Ye0AIOA2Cb2bERMhAHkADAqsAryXNqoWnZTCAXqzeby4qLEi4woipRShmHwvw3MW9a/AoQIhfV8eX1zuzvZZeuG3TQdQ3dtCwPHMCJGLKKU2o5vmJwoEl40LW/hzrypm/8XZCy0eCnDy+0AAAAASUVORK5CYII=\"\n)\n"
  },
  {
    "path": "server/stats.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com/PagerDuty/godspeed\"\n)\n\ntype RuntimeStats interface {\n\tLogStartup()\n\n\tRequest(url string)\n\tResponseTime(elapsed time.Duration, url string)\n\tThumbnail(name string)\n\tUpload(source string)\n\tError(code int)\n}\n\ntype DiscardStats struct{}\n\nfunc (d *DiscardStats) LogStartup()                                    {}\nfunc (d *DiscardStats) Request(url string)                             {}\nfunc (d *DiscardStats) ResponseTime(elapsed time.Duration, url string) {}\nfunc (d *DiscardStats) Thumbnail(name string)                          {}\nfunc (d *DiscardStats) Upload(source string)                           {}\nfunc (d *DiscardStats) Error(code int)                                 {}\n\ntype DatadogStats struct {\n\tdog *godspeed.Godspeed\n}\n\nfunc NewDatadogStats(datadogHost string) (*DatadogStats, error) {\n\tvar ip net.IP = nil\n\tvar err error = nil\n\n\t// Assume datadogHost is an IP and try to parse it\n\tip = net.ParseIP(datadogHost)\n\n\t// Parsing failed\n\tif ip == nil {\n\t\tips, _ := net.LookupIP(datadogHost)\n\n\t\tif len(ips) > 0 {\n\t\t\tip = ips[0]\n\t\t}\n\t}\n\n\tif ip != nil {\n\t\tgdsp, err := godspeed.New(ip.String(), godspeed.DefaultPort, false)\n\t\tif err == nil {\n\t\t\treturn &DatadogStats{gdsp}, nil\n\t\t}\n\t}\n\n\treturn nil, err\n}\n\nfunc (d *DatadogStats) LogStartup() {\n\td.dog.Incr(\"mandible.startup\", nil)\n}\n\nfunc (d *DatadogStats) Request(url string) {\n\ttag := fmt.Sprintf(\"url:%s\", url)\n\n\td.dog.Incr(\"mandible.request\", []string{tag})\n}\n\nfunc (d *DatadogStats) ResponseTime(elapsed time.Duration, url string) {\n\ttime := elapsed.Seconds()\n\ttag := fmt.Sprintf(\"url:%s\", url)\n\n\td.dog.Timing(\"mandible.responseTime\", time, []string{tag})\n}\n\nfunc (d *DatadogStats) Thumbnail(name string) {\n\ttag := fmt.Sprintf(\"size:%s\", name)\n\n\td.dog.Incr(\"mandible.thumbnail\", []string{tag})\n}\n\nfunc (d *DatadogStats) Upload(source string) {\n\ttag := fmt.Sprintf(\"source:%s\", source)\n\n\td.dog.Incr(\"mandible.upload\", []string{tag})\n}\n\nfunc (d *DatadogStats) Error(code int) {\n\ttag := fmt.Sprintf(\"code:%d\", code)\n\td.dog.Incr(\"mandible.error\", []string{tag})\n}\n"
  },
  {
    "path": "uploadedfile/thumbfile.go",
    "content": "package uploadedfile\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com/Imgur/mandible/imageprocessor/processorcommand\"\n\t\"github.com/Imgur/mandible/imageprocessor/thumbType\"\n)\n\nvar (\n\tdefaultQuality   = 83\n\tmaxImageSideSize = 10000\n)\n\ntype ThumbFile struct {\n\tlocalPath string\n\n\tName          string\n\tWidth         int\n\tMaxWidth      int\n\tHeight        int\n\tMaxHeight     int\n\tShape         string\n\tCropGravity   string\n\tCropWidth     int\n\tCropHeight    int\n\tCropRatio     string\n\tQuality       int\n\tFormat        string\n\tStoreURI      string\n\tDesiredFormat string\n\tNoStore       bool\n}\n\nfunc NewThumbFile(width, maxWidth, height, maxHeight int, name, shape, path, cropGravity string, cropWidth, cropHeight int, cropRatio string, quality int, desiredFormat string, noStore bool) *ThumbFile {\n\tif quality == 0 {\n\t\tquality = defaultQuality\n\t}\n\n\treturn &ThumbFile{\n\t\tlocalPath: path,\n\n\t\tName:          name,\n\t\tWidth:         width,\n\t\tMaxWidth:      maxWidth,\n\t\tHeight:        height,\n\t\tMaxHeight:     maxHeight,\n\t\tShape:         shape,\n\t\tCropGravity:   cropGravity,\n\t\tCropWidth:     cropWidth,\n\t\tCropHeight:    cropHeight,\n\t\tCropRatio:     cropRatio,\n\t\tQuality:       quality,\n\t\tFormat:        \"\",\n\t\tStoreURI:      \"\",\n\t\tDesiredFormat: desiredFormat,\n\t\tNoStore:       noStore,\n\t}\n}\n\nfunc (this *ThumbFile) GetNoStore() bool {\n\treturn this.NoStore\n}\n\nfunc (this *ThumbFile) SetPath(path string) error {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn errors.New(fmt.Sprintf(\"Error when creating thumbnail %s\", this.Name))\n\t}\n\n\tthis.localPath = path\n\n\treturn nil\n}\n\nfunc (this *ThumbFile) GetPath() string {\n\treturn this.localPath\n}\n\nfunc (this *ThumbFile) GetOutputFormat(original *UploadedFile) thumbType.ThumbType {\n\tif this.DesiredFormat != \"\" {\n\t\treturn thumbType.FromString(this.DesiredFormat)\n\t}\n\n\treturn thumbType.FromMime(original.GetMime())\n}\n\nfunc (this *ThumbFile) ComputeWidth(original *UploadedFile) int {\n\twidth := this.Width\n\n\toWidth, _, err := original.Dimensions()\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\tif this.MaxWidth > 0 {\n\t\twidth = int(math.Min(float64(oWidth), float64(this.MaxWidth)))\n\t}\n\n\treturn width\n}\n\nfunc (this *ThumbFile) ComputeHeight(original *UploadedFile) int {\n\theight := this.Height\n\n\t_, oHeight, err := original.Dimensions()\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\tif this.MaxHeight > 0 {\n\t\theight = int(math.Min(float64(oHeight), float64(this.MaxHeight)))\n\t}\n\n\treturn height\n}\n\nfunc (this *ThumbFile) ComputeCrop(original *UploadedFile) (int, int, error) {\n\tre := regexp.MustCompile(\"(.*):(.*)\")\n\tmatches := re.FindStringSubmatch(this.CropRatio)\n\tif len(matches) != 3 {\n\t\treturn 0, 0, errors.New(\"Invalid crop_ratio\")\n\t}\n\n\twRatio, werr := strconv.ParseFloat(matches[1], 64)\n\thRatio, herr := strconv.ParseFloat(matches[2], 64)\n\tif werr != nil || herr != nil {\n\t\treturn 0, 0, errors.New(\"Invalid crop_ratio\")\n\t}\n\n\tvar cropWidth, cropHeight float64\n\n\tif wRatio >= hRatio {\n\t\twRatio = wRatio / hRatio\n\t\thRatio = 1\n\t\tcropWidth = math.Ceil(float64(this.ComputeHeight(original)) * wRatio)\n\t\tcropHeight = math.Ceil(float64(this.ComputeHeight(original)) * hRatio)\n\t} else {\n\t\thRatio = hRatio / wRatio\n\t\twRatio = 1\n\t\tcropWidth = math.Ceil(float64(this.ComputeWidth(original)) * wRatio)\n\t\tcropHeight = math.Ceil(float64(this.ComputeWidth(original)) * hRatio)\n\t}\n\n\treturn int(cropWidth), int(cropHeight), nil\n}\n\nfunc (this *ThumbFile) Process(original *UploadedFile) error {\n\tswitch this.Shape {\n\tcase \"circle\":\n\t\treturn this.processCircle(original)\n\tcase \"thumb\":\n\t\treturn this.processThumb(original)\n\tcase \"square\":\n\t\treturn this.processSquare(original)\n\tcase \"custom\":\n\t\treturn this.processCustom(original)\n\tdefault:\n\t\treturn this.processFull(original)\n\t}\n}\n\nfunc (this *ThumbFile) String() string {\n\treturn fmt.Sprintf(\"Thumbnail of <%s>\", this.Name)\n}\n\nfunc (this *ThumbFile) processSquare(original *UploadedFile) error {\n\tif this.Width == 0 {\n\t\treturn errors.New(\"Width cannot be 0\")\n\t}\n\tif this.Width > maxImageSideSize {\n\t\treturn errors.New(\"Width too large\")\n\t}\n\n\tfilename, err := processorcommand.SquareThumb(original.GetPath(), this.Name, this.Width, this.Quality, this.GetOutputFormat(original))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := this.SetPath(filename); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (this *ThumbFile) processCircle(original *UploadedFile) error {\n\tif this.Width == 0 {\n\t\treturn errors.New(\"Width cannot be 0\")\n\t}\n\tif this.Width > maxImageSideSize {\n\t\treturn errors.New(\"Width too large\")\n\t}\n\n\t//Circle thumbs should always be PNGs\n\toutputFormat := thumbType.FromString(\"png\")\n\n\tfilename, err := processorcommand.CircleThumb(original.GetPath(), this.Name, this.Width, this.Quality, outputFormat)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := this.SetPath(filename); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (this *ThumbFile) processThumb(original *UploadedFile) error {\n\tif this.Width == 0 {\n\t\treturn errors.New(\"Width cannot be 0\")\n\t}\n\tif this.Width > maxImageSideSize {\n\t\treturn errors.New(\"Width too large\")\n\t}\n\tif this.Height == 0 {\n\t\treturn errors.New(\"Height cannot be 0\")\n\t}\n\tif this.Height > maxImageSideSize {\n\t\treturn errors.New(\"Height too large\")\n\t}\n\n\tfilename, err := processorcommand.Thumb(original.GetPath(), this.Name, this.Width, this.Height, this.Quality, this.GetOutputFormat(original))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := this.SetPath(filename); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (this *ThumbFile) processCustom(original *UploadedFile) error {\n\tcropWidth := this.CropWidth\n\tcropHeight := this.CropHeight\n\tvar err error\n\n\tif this.CropRatio != \"\" {\n\t\tcropWidth, cropHeight, err = this.ComputeCrop(original)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\twidth := this.ComputeWidth(original)\n\theight := this.ComputeHeight(original)\n\tvalidWidth := width > 0 && width <= maxImageSideSize\n\tvalidHeight := height > 0 && height <= maxImageSideSize\n\n\tif !validWidth && !validHeight {\n\t\tif !validWidth {\n\t\t\treturn errors.New(\"Invalid width\")\n\t\t}\n\n\t\treturn errors.New(\"Invalid height\")\n\t}\n\n\tfilename, err := processorcommand.CustomThumb(original.GetPath(), this.Name, width, height, this.CropGravity, cropWidth, cropHeight, this.Quality, this.GetOutputFormat(original))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := this.SetPath(filename); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (this *ThumbFile) processFull(original *UploadedFile) error {\n\tfilename, err := processorcommand.Full(original.GetPath(), this.Name, this.Quality, this.GetOutputFormat(original))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := this.SetPath(filename); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "uploadedfile/uploadedfile.go",
    "content": "package uploadedfile\n\nimport (\n\t\"errors\"\n\t\"image\"\n\t\"image/gif\"\n\t\"image/jpeg\"\n\t\"image/png\"\n\t\"net/http\"\n\t\"os\"\n)\n\ntype UploadedFile struct {\n\tfilename string\n\tpath     string\n\tmime     string\n\thash     string\n\tocrText  string\n\tthumbs   []*ThumbFile\n}\n\nvar supportedTypes = map[string]bool{\n\t\"image/jpeg\": true,\n\t\"image/jpg\":  true,\n\t\"image/gif\":  true,\n\t\"image/png\":  true,\n}\n\nfunc NewUploadedFile(filename, path string, thumbs []*ThumbFile) (*UploadedFile, error) {\n\tfile, err := os.Open(path)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuff := make([]byte, 512) // http://golang.org/pkg/net/http/#DetectContentType\n\t_, err = file.Read(buff)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfiletype := http.DetectContentType(buff)\n\n\tif _, ok := supportedTypes[filetype]; !ok {\n\t\treturn nil, errors.New(\"Unsupported file type!\")\n\t}\n\n\treturn &UploadedFile{\n\t\tfilename,\n\t\tpath,\n\t\tfiletype,\n\t\t\"\",\n\t\t\"\",\n\t\tthumbs,\n\t}, nil\n}\n\nfunc (this *UploadedFile) GetFilename() string {\n\treturn this.filename\n}\n\nfunc (this *UploadedFile) SetFilename(filename string) {\n\tthis.filename = filename\n}\n\nfunc (this *UploadedFile) GetHash() string {\n\treturn this.hash\n}\n\nfunc (this *UploadedFile) SetHash(hash string) {\n\tthis.hash = hash\n}\n\nfunc (this *UploadedFile) GetOCRText() string {\n\treturn this.ocrText\n}\n\nfunc (this *UploadedFile) SetOCRText(text string) {\n\tthis.ocrText = text\n}\n\nfunc (this *UploadedFile) SetPath(path string) {\n\t// TODO: find a better location for this\n\tos.Remove(this.path)\n\n\tthis.path = path\n}\n\nfunc (this *UploadedFile) GetPath() string {\n\treturn this.path\n}\n\nfunc (this *UploadedFile) GetMime() string {\n\treturn this.mime\n}\n\nfunc (this *UploadedFile) SetMime(mime string) {\n\tthis.mime = mime\n}\n\nfunc (this *UploadedFile) SetThumbs(thumbs []*ThumbFile) {\n\tthis.thumbs = thumbs\n}\n\nfunc (this *UploadedFile) GetThumbs() []*ThumbFile {\n\treturn this.thumbs\n}\n\nfunc (this *UploadedFile) FileSize() (int64, error) {\n\tf, err := os.Open(this.path)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tstats, err := f.Stat()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tsize := stats.Size()\n\n\treturn size, nil\n}\n\nfunc (this *UploadedFile) Clean() {\n\tos.Remove(this.path)\n\n\tfor _, thumb := range this.thumbs {\n\t\tos.Remove(thumb.GetPath())\n\t}\n}\n\nfunc (this *UploadedFile) Dimensions() (int, int, error) {\n\tf, err := os.Open(this.path)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tvar cfg image.Config\n\tswitch true {\n\tcase this.IsGif():\n\t\tcfg, err = gif.DecodeConfig(f)\n\tcase this.IsPng():\n\t\tcfg, err = png.DecodeConfig(f)\n\tcase this.IsJpeg():\n\t\tcfg, err = jpeg.DecodeConfig(f)\n\tdefault:\n\t\treturn 0, 0, errors.New(\"Invalid mime type!\")\n\t}\n\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\treturn cfg.Width, cfg.Height, nil\n}\n\nfunc (this *UploadedFile) IsJpeg() bool {\n\treturn (this.GetMime() == \"image/jpeg\" || this.GetMime() == \"image/jpg\")\n}\n\nfunc (this *UploadedFile) IsPng() bool {\n\treturn this.GetMime() == \"image/png\"\n}\n\nfunc (this *UploadedFile) IsGif() bool {\n\treturn this.GetMime() == \"image/gif\"\n}\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/.gitignore",
    "content": "# Misc\n*.swp\n\n# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n*.test\n*.prof\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/.travis.yml",
    "content": "language: go\ngo:\n  - 1.5.3\nbranches:\n  only:\n    - master\nscript: go test -v ./... -check.vv\nsudo: false\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/LICENSE",
    "content": "Copyright (c) 2014-2015, PagerDuty Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of PagerDuty nor the names of its contributors may be used\n   to endorse or promote products derived from this software without specific\n   prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL PagerDuty OR CONTRIBUTORS BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/README.md",
    "content": "# Godspeed\n[![TravisCI Build Status](https://img.shields.io/travis/PagerDuty/godspeed/master.svg?style=flat)](https://travis-ci.org/PagerDuty/godspeed)\n[![GoDoc](https://img.shields.io/badge/godspeed-GoDoc-blue.svg?style=flat)](https://godoc.org/github.com/PagerDuty/godspeed)\n[![License](https://img.shields.io/badge/License-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/PagerDuty/godspeed/blob/master/LICENSE)\n\nGodspeed is a statsd client for the Datadog extension of statsd (DogStatsD).\nThe name `godspeed` is a bit of a rhyming slang twist on DogStatsD. It's also a\npoke at the fact that the statsd protocol's transport mechanism is UDP...\n\nCheck out [GoDoc](https://godoc.org/github.com/PagerDuty/godspeed) for the docs\nas well as some examples.\n\nDogStatsD is a copyright of `Datadog <info@datadoghq.com>`.\n\n## License\nGodspeed is released under the BSD 3-Clause License. See the `LICENSE` file for\nthe full contents of the license.\n\n## Installation\n```\ngo get -u github.com/PagerDuty/godspeed\n```\n\n## Usage\nFor more details either look at the `_example_test.go` files directly or view\nthe examples on [GoDoc](https://godoc.org/github.com/PagerDuty/godspeed#pkg-examples).\n\n### Emitting a gauge\n```Go\ng, err := godspeed.NewDefault()\n\nif err != nil {\n    // handle error\n}\n\ndefer g.Conn.Close()\n\nerr = g.Gauge(\"example.stat\", 1, nil)\n\nif err != nil {\n\t// handle error\n}\n```\n\n### Emitting an event\n```Go\n// make sure to handle the error\ng, _ := godspeed.NewDefault()\n\ndefer g.Conn.Close()\n\ntitle := \"Nginx service restart\"\ntext := \"The Nginx service has been restarted\"\n\n// the optionals are for the optional arguments available for an event\n// http://docs.datadoghq.com/guides/dogstatsd/#fields\noptionals := make(map[string]string)\noptionals[\"alert_type\"] = \"info\"\noptionals[\"source_type_name\"] = \"nginx\"\n\naddlTags := []string{\"source_type:nginx\"}\n\nerr := g.Event(title, text, optionals, addlTags)\n\nif err != nil {\n    fmt.Println(\"err:\", err)\n}\n```"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/async.go",
    "content": "// Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved.\n// Use of this source code is governed by the BSD 3-Clause\n// license that can be found in the LICENSE file.\n\npackage godspeed\n\nimport \"sync\"\n\n// AsyncGodspeed is used for asynchronous Godspeed calls.\n// The AsyncGodspeed emission methods have an additional argument\n// for a *sync.WaitGroup to have the method indicate when finished.\ntype AsyncGodspeed struct {\n\t// Godspeed is an instance of Godspeed\n\tGodspeed *Godspeed\n\n\t// W is a *sync.WaitGroup used for blocking application execution\n\t// when you want to wait for stats to be emitted.\n\t// This is here as a convenience, and you can use your own WaitGroup\n\t// in any AsyncGodspeed method calls.\n\tW *sync.WaitGroup\n}\n\n// NewAsync returns an instance of AsyncGodspeed. This is the more async-friendly version of Godspeed\n// autoTruncate dictactes whether long stats emissions get auto-truncated or dropped. Unfortunately,\n// Events will always be dropped. If you need monitor your events, you can access the Godspeed instance\n// directly.\nfunc NewAsync(host string, port int, autoTruncate bool) (a *AsyncGodspeed, err error) {\n\tgs, err := New(host, port, autoTruncate)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta = &AsyncGodspeed{\n\t\tGodspeed: gs,\n\t\tW:        new(sync.WaitGroup),\n\t}\n\n\treturn\n}\n\n// NewDefaultAsync is just like NewAsync except it uses the DefaultHost and DefaultPort\nfunc NewDefaultAsync() (a *AsyncGodspeed, err error) {\n\ta, err = NewAsync(DefaultHost, DefaultPort, false)\n\treturn\n}\n\n// AddTag is identical to that within the Godspeed client\nfunc (a *AsyncGodspeed) AddTag(tag string) []string {\n\treturn a.Godspeed.AddTag(tag)\n}\n\n// AddTags is identical to that within the Godspeed client\nfunc (a *AsyncGodspeed) AddTags(tags []string) []string {\n\treturn a.Godspeed.AddTags(tags)\n}\n\n// SetNamespace is identical to that within the Godspeed client\nfunc (a *AsyncGodspeed) SetNamespace(ns string) {\n\ta.Godspeed.SetNamespace(ns)\n}\n\n// Event is almost identical to that within the Godspeed client\n// The only chnage is that it has no return value, and takes a\n// (sync.WaitGroup) argument\nfunc (a *AsyncGodspeed) Event(title, body string, keys map[string]string, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Event(title, body, keys, tags)\n}\n\n// Send is almost identical to that within the Godspeed client\n// with the addition of an argument and removal of the return value\nfunc (a *AsyncGodspeed) Send(stat, kind string, delta, sampleRate float64, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Send(stat, kind, delta, sampleRate, tags)\n}\n\n// ServiceCheck is almost identical to that within the Godspeed client\n// with the addition of an argument and removal of the return value\nfunc (a *AsyncGodspeed) ServiceCheck(name string, status int, fields map[string]string, tags []string, y *sync.WaitGroup) {\n\tif y != nil {\n\t\tdefer y.Done()\n\t}\n\n\ta.Godspeed.ServiceCheck(name, status, fields, tags)\n}\n\n// Count is almost identical to that within the Godspeed client\n// As with the other AsyncGodpseed functions it omits a return value and\n// takes a *sync.WaitGroup instance\nfunc (a *AsyncGodspeed) Count(stat string, count float64, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Count(stat, count, tags)\n}\n\n// Incr is almost identical to that within the Godspeed client,\n// except it has no return value and takes a *sync.WaitGroup argument.\nfunc (a *AsyncGodspeed) Incr(stat string, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Incr(stat, tags)\n}\n\n// Decr is almost identical to that within the Godspeed client. It has\n// no return value and takes a *sync.WaitGroup argument.\n//\n// Also, I've gotten tired of typing \"Xxx is almost identical to that within...\" so congrats\n// on making it this far in to the docs.\nfunc (a *AsyncGodspeed) Decr(stat string, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Decr(stat, tags)\n}\n\n// Gauge is almost identical to that within the Godspeed client.\n// Here it has no return value, and takes a *sync.WaitGroup argument\nfunc (a *AsyncGodspeed) Gauge(stat string, value float64, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Gauge(stat, value, tags)\n}\n\n// Histogram is almost identical to that within the Godspeed client.\n// Within AsyncGodspeed it has no return value, and also takes a *sync.WaitGroup argument\nfunc (a *AsyncGodspeed) Histogram(stat string, value float64, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Histogram(stat, value, tags)\n}\n\n// Timing is almost identical to that within the Godspeed client.\n// The return value is removed, and it takes a *sync.WaitGroup argument here\nfunc (a *AsyncGodspeed) Timing(stat string, value float64, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Timing(stat, value, tags)\n}\n\n// Set is almost identical to that within the Godspeed client\nfunc (a *AsyncGodspeed) Set(stat string, value float64, tags []string, y *sync.WaitGroup) {\n\tdefer y.Done()\n\n\ta.Godspeed.Set(stat, value, tags)\n}\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/events.go",
    "content": "// Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved.\n// Use of this source code is governed by the BSD 3-Clause\n// license that can be found in the LICENSE file.\n\npackage godspeed\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar eventKeys = []string{\"date_happened\", \"hostname\", \"aggregation_key\", \"priority\", \"source_type_name\", \"alert_type\"}\nvar eventMarkers = []rune{'d', 'h', 'k', 'p', 's', 't'}\n\nfunc escapeEvent(s string) string {\n\treturn strings.NewReplacer(\"\\n\", \"\\\\n\").Replace(s)\n}\n\nfunc removePipes(s string) string {\n\treturn strings.Replace(s, \"|\", \"\", -1)\n}\n\n// Event is the function for submitting a Datadog event.\n// This is a Datadog-specific emission and most likely will not work on other statsd implementations.\n// title and body are both strings, and are the title and body of the event respectively.\n// field can be used to send the optional keys.\nfunc (g *Godspeed) Event(title, text string, fields map[string]string, tags []string) error {\n\tif len(title) < 1 {\n\t\treturn fmt.Errorf(\"title must have at least one character\")\n\t}\n\n\tif len(text) < 1 {\n\t\treturn fmt.Errorf(\"body must have at least one character\")\n\t}\n\n\tvar buf bytes.Buffer\n\n\ttitle = escapeEvent(title)\n\ttext = escapeEvent(text)\n\n\tbuf.WriteString(fmt.Sprintf(\"_e{%d,%d}:%v|%v\", len(title), len(text), title, text))\n\n\t// if some fields were passed in convert them to their proper format\n\t// and write that to the buffer\n\tif len(fields) > 0 {\n\t\tfor i, v := range eventKeys {\n\t\t\tif mv, ok := fields[v]; ok {\n\t\t\t\tbuf.WriteString(fmt.Sprintf(\"|%v:%v\", string(eventMarkers[i]), removePipes(mv)))\n\t\t\t}\n\t\t}\n\t}\n\n\ttags = uniqueTags(append(g.Tags, tags...))\n\n\tif len(tags) > 0 {\n\t\tfor i, v := range tags {\n\t\t\ttags[i] = strings.Replace(v, \"|\", \"\", -1)\n\t\t}\n\n\t\tbuf.WriteString(fmt.Sprintf(\"|#%v\", strings.Join(tags, \",\")))\n\t}\n\n\t// this handles the logic for truncation\n\t// if the buffer length is larger than the max, return an error\n\t// else just write it\n\tif bufLen := buf.Len(); bufLen > MaxBytes {\n\t\treturn fmt.Errorf(\"error sending %v, packet larger than %d (%d)\", string(title), MaxBytes, buf.Len())\n\t}\n\n\t_, err := g.Conn.Write(buf.Bytes())\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/godspeed.go",
    "content": "// Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved.\n// Use of this source code is governed by the BSD 3-Clause\n// license that can be found in the LICENSE file.\n\n// Package godspeed is a statsd client for the Datadog extension of statsd\n// called DogStatsD. It can be used to emit statsd stats, Datadog-specific\n// events, and DogStatsD service checks. This client also has the ability to\n// tag all outgoing statsd metrics. Godspeed is meant for synchronous calls,\n// while AsyncGodspeed is used for what it says on the tin.\n//\n// The name godspeed is a bit of a rhyming slang twist on DogStatsD. It's\n// also a poke at the fact that the statsd protocol's transport mechanism\n// is UDP.\n//\n// DogStatsD is a copyright of Datadog <info@datadoghq.com>\npackage godspeed\n\nimport (\n\t\"fmt\"\n\t\"net\"\n)\n\nconst (\n\t// DefaultHost is 127.0.0.1 (localhost)\n\tDefaultHost = \"127.0.0.1\"\n\n\t// DefaultPort is 8125\n\tDefaultPort = 8125\n\n\t// MaxBytes is the largest UDP datagram we will try to send\n\tMaxBytes = 8192\n)\n\n// Godspeed is an unbuffered Statsd client with compatibility geared towards the Datadog statsd format\n// It consists of Conn (*net.UDPConn) object for sending metrics over UDP,\n// Namespace (string) for namespacing metrics, and Tags ([]string) for tags to send with stats\ntype Godspeed struct {\n\t// Conn is the UDP connection used for sending the statsd emissions\n\tConn *net.UDPConn\n\n\t// Namespace is the namespace all stats emissions are prefixed with:\n\t// <namespace>.<statname>\n\tNamespace string\n\n\t// Tags is the slice of tags to append to each stat emission\n\tTags []string\n\n\t// AutoTruncate specifies whether or not we will try to truncate a stat\n\t// before emitting it or just return an error. This is most helpful when\n\t// using AsyncGodspeed. However, it can result in invalid stat being emitted\n\t// due to the body being truncated. Meant for when a single emission would\n\t// be greater than 8192 bytes.\n\tAutoTruncate bool\n}\n\n// New returns a new instance of a Godspeed statsd client.\n// This method takes the host as a string, and port as an int.\n// There is also the ability for autoTruncate. If your metric is longer than MaxBytes\n// autoTruncate can be used to truncate the message instead of erroring. This doesn't work\n// on events and will always return an error.\nfunc New(host string, port int, autoTruncate bool) (g *Godspeed, err error) {\n\t// build a new UDP dialer\n\taddr, err := net.ResolveUDPAddr(\"udp\", fmt.Sprintf(\"%s:%d\", host, port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := net.DialUDP(\"udp\", nil, addr)\n\n\t// if it failed return a pointer to an empty Godspeed struct, and the error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// build a new Godspeed struct with the UDPConn\n\tg = &Godspeed{\n\t\tConn:         c,\n\t\tTags:         make([]string, 0),\n\t\tAutoTruncate: autoTruncate,\n\t}\n\n\treturn\n}\n\n// NewDefault is the same as New() except it uses DefaultHost and DefaultPort for the connection.\nfunc NewDefault() (g *Godspeed, err error) {\n\tg, err = New(DefaultHost, DefaultPort, false)\n\treturn\n}\n\n// AddTag allows you to add a tag for all future emitted stats.\n// It takes the tag as a string, and returns a []string containing all Godspeed tags\nfunc (g *Godspeed) AddTag(tag string) []string {\n\t// return early if the tag already exists\n\tfor _, v := range g.Tags {\n\t\tif tag == v {\n\t\t\treturn g.Tags\n\t\t}\n\t}\n\n\t// add the tag\n\tg.Tags = append(g.Tags, tag)\n\n\treturn g.Tags\n}\n\n// AddTags is like AddTag(), except it tages a []string and adds each contained string\n// This also returns a []string containing the current tags\nfunc (g *Godspeed) AddTags(tags []string) []string {\n\t// if we already have tags add each tag one at a time\n\t// otherwise unique the list and assign it directly\n\tif len(g.Tags) > 0 {\n\t\tfor _, tag := range tags {\n\t\t\tg.AddTag(tag)\n\t\t}\n\t} else {\n\t\tg.Tags = uniqueTags(tags)\n\t}\n\n\treturn g.Tags\n}\n\n// SetNamespace allows you to prefix all of your metrics with a certain namespace\nfunc (g *Godspeed) SetNamespace(ns string) {\n\tg.Namespace = trimReserved(ns)\n}\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/service_checks.go",
    "content": "// Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved.\n// Use of this source code is governed by the BSD 3-Clause\n// license that can be found in the LICENSE file.\n\npackage godspeed\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar scKeys = []string{\"service_check_message\", \"timestamp\", \"hostname\"}\nvar scMark = []string{\"m\", \"d\", \"h\"}\n\n// ServiceCheck is a function to emit DogStatsD service checks\n// to the local DD agent. It takes the name of the service,\n// which must NOT contain a pipe (|) character, and the numeric\n// status for the service. The status values are the same as Nagios:\n//\n// OK = 0, WARNING = 1, CRITICAL = 2, UNKNOWN = 3\n//\n// This functionality is an extension to the statsd\n// protocol by Datadog (DogStatsD):\n//\n// http://docs.datadoghq.com/guides/dogstatsd/#service-checks\nfunc (g *Godspeed) ServiceCheck(name string, status int, fields map[string]string, tags []string) error {\n\tif len(name) == 0 {\n\t\treturn fmt.Errorf(\"service name must have at least one character\")\n\t}\n\n\tif status < 0 || status > 3 {\n\t\treturn fmt.Errorf(\"unknown service status (%d); known values: 0,1,2,3\", status)\n\t}\n\n\tif strings.ContainsAny(\"|\", name) {\n\t\treturn fmt.Errorf(\"service name '%s' may not include pipe character ('|')\", name)\n\t}\n\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(fmt.Sprintf(\"_sc|%s|%d\", name, status))\n\n\tif len(fields) > 0 {\n\t\tfor i, v := range scKeys {\n\t\t\tif mv, ok := fields[v]; ok {\n\t\t\t\tbuf.WriteString(fmt.Sprintf(\"|%s:%s\", scMark[i], removePipes(mv)))\n\t\t\t}\n\t\t}\n\t}\n\n\ttags = uniqueTags(append(g.Tags, tags...))\n\n\tif len(tags) > 0 {\n\t\tfor i, v := range tags {\n\t\t\ttags[i] = strings.Replace(v, \"|\", \"\", -1)\n\t\t}\n\t\tbuf.WriteString(fmt.Sprintf(\"|#%s\", strings.Join(tags, \",\")))\n\t}\n\n\tif bufLen := buf.Len(); bufLen > MaxBytes {\n\t\treturn fmt.Errorf(\"error sending %s service check, packet larger than %d (%d)\", name, MaxBytes, bufLen)\n\t}\n\n\t_, err := g.Conn.Write(buf.Bytes())\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/shared.go",
    "content": "// Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved.\n// Use of this source code is governed by the BSD 3-Clause\n// license that can be found in the LICENSE file.\n\npackage godspeed\n\nimport \"strings\"\n\n// stats names can't include :, |, or @\nfunc trimReserved(s string) string {\n\treturn strings.NewReplacer(\":\", \"_\", \"|\", \"_\", \"@\", \"_\").Replace(s)\n}\n\n// function to make sure tags are unique\nfunc uniqueTags(t []string) []string {\n\t// if the tag slice is empty avoid allocation\n\tif len(t) < 1 {\n\t\treturn nil\n\t}\n\n\t// build a map to track which values we've seen\n\ts := make(map[string]bool)\n\n\t// loop over each string provided\n\t// if the value is not in the map then replace\n\t// the value at t[len(s)] so that we always have\n\t// only unique tags at the beginning of the slice\n\tfor i, v := range t {\n\t\tif _, x := s[v]; !x {\n\t\t\t// only change the value if needed\n\t\t\tif i != len(s) {\n\t\t\t\tt[len(s)] = v\n\t\t\t}\n\n\t\t\ts[v] = true\n\t\t}\n\t}\n\n\t// based on the size of the map we know\n\t// how many unique tags there were\n\t// so return that slice\n\treturn []string(t[:len(s)])\n}\n"
  },
  {
    "path": "vendor/github.com/PagerDuty/godspeed/stats.go",
    "content": "// Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved.\n// Use of this source code is governed by the BSD 3-Clause\n// license that can be found in the LICENSE file.\n\npackage godspeed\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Send is the function for emitting the metrics to statsd\n// It takes the name of the stat as a string, as well as the kind.\n// The kind is \"g\" for gauge, \"c\" for count, \"ms\" for timing, etc.\n// This returns any error hit during the flushing of the stat\nfunc (g *Godspeed) Send(stat, kind string, delta, sampleRate float64, tags []string) (err error) {\n\t// if the connection hasn't been set up yet\n\tif g.Conn == nil {\n\t\treturn fmt.Errorf(\"socket not created\")\n\t}\n\n\t// return if the sample rate is less than 1 and the random number is less than the sample rate\n\tif sampleRate < 1 && rand.Float64() >= sampleRate {\n\t\treturn nil\n\t}\n\n\tvar buffer bytes.Buffer\n\n\t// if we have a namespace write it to the byte buffer\n\tif len(g.Namespace) > 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%v.\", g.Namespace))\n\t}\n\n\tfloatStr := strconv.FormatFloat(delta, 'f', -1, 64)\n\n\t// write the name of the metric to the byte buffer as well as the metric itself\n\tbuffer.WriteString(fmt.Sprintf(\"%v:%v|%v\", string(trimReserved(stat)), floatStr, kind))\n\n\t// if the sample rate is less than 1 add it too\n\tif sampleRate < 1 {\n\t\tfloatStr = strconv.FormatFloat(sampleRate, 'f', -1, 64)\n\t\tbuffer.WriteString(fmt.Sprintf(\"|@%v\", floatStr))\n\t}\n\n\t// add any provided tags to the metric\n\ttags = uniqueTags(append(g.Tags, tags...))\n\tif len(tags) > 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(\"|#%v\", strings.Join(tags, \",\")))\n\t}\n\n\t// this handles the logic for truncation\n\t// if the buffer length is smaller than the max, just write it\n\t// else if AutoTruncate is enabled truncate/write the bytes\n\t// else generate an error to return\n\tif buffer.Len() <= MaxBytes {\n\t\t_, err = g.Conn.Write(buffer.Bytes())\n\t} else if g.AutoTruncate {\n\t\t_, err = g.Conn.Write(buffer.Bytes()[0:MaxBytes])\n\t} else {\n\t\terr = fmt.Errorf(\"error sending %v, packet larger than %d (%d)\", stat, MaxBytes, buffer.Len())\n\t}\n\n\treturn\n}\n\n// Count wraps Send() and simplifies the interface for Count stats\nfunc (g *Godspeed) Count(stat string, count float64, tags []string) error {\n\treturn g.Send(stat, \"c\", count, 1, append(g.Tags, tags...))\n}\n\n// Incr wraps Send() and simplifies the interface for incrementing a counter\n// It only takes the name of the stat, and tags\nfunc (g *Godspeed) Incr(stat string, tags []string) error {\n\treturn g.Count(stat, 1, append(g.Tags, tags...))\n}\n\n// Decr wraps Send() and simplifies the interface for decrementing a counter\n// It only takes the name of the stat, and tags\nfunc (g *Godspeed) Decr(stat string, tags []string) error {\n\treturn g.Count(stat, -1, append(g.Tags, tags...))\n}\n\n// Gauge wraps Send() and simplifies the interface for Gauge stats\nfunc (g *Godspeed) Gauge(stat string, value float64, tags []string) error {\n\treturn g.Send(stat, \"g\", value, 1, append(g.Tags, tags...))\n}\n\n// Histogram wraps Send() and simplifies the interface for Histogram stats\nfunc (g *Godspeed) Histogram(stat string, value float64, tags []string) error {\n\treturn g.Send(stat, \"h\", value, 1, append(g.Tags, tags...))\n}\n\n// Timing wraps Send() and simplifies the interface for Timing stats\nfunc (g *Godspeed) Timing(stat string, value float64, tags []string) error {\n\treturn g.Send(stat, \"ms\", value, 1, append(g.Tags, tags...))\n}\n\n// Set wraps Send() and simplifies the interface for Timing stats\nfunc (g *Godspeed) Set(stat string, value float64, tags []string) error {\n\treturn g.Send(stat, \"s\", value, 1, append(g.Tags, tags...))\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/.gitignore",
    "content": "*~\nh2i/h2i\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/AUTHORS",
    "content": "# This file is like Go's AUTHORS file: it lists Copyright holders.\n# The list of humans who have contributd is in the CONTRIBUTORS file.\n#\n# To contribute to this project, because it will eventually be folded\n# back in to Go itself, you need to submit a CLA:\n#\n#    http://golang.org/doc/contribute.html#copyright\n#\n# Then you get added to CONTRIBUTORS and you or your company get added\n# to the AUTHORS file.\n\nBlake Mizerany <blake.mizerany@gmail.com> github=bmizerany\nDaniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing\nGabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr\nGoogle, Inc.\nKeith Rarick <kr@xph.us> github=kr\nMatthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan\nMatt Layher <mdlayher@gmail.com> github=mdlayher\nPerry Abbott <perry.j.abbott@gmail.com> github=pabbott0\nTatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/CONTRIBUTORS",
    "content": "# This file is like Go's CONTRIBUTORS file: it lists humans.\n# The list of copyright holders (which may be companies) are in the AUTHORS file.\n#\n# To contribute to this project, because it will eventually be folded\n# back in to Go itself, you need to submit a CLA:\n#\n#    http://golang.org/doc/contribute.html#copyright\n#\n# Then you get added to CONTRIBUTORS and you or your company get added\n# to the AUTHORS file.\n\nBlake Mizerany <blake.mizerany@gmail.com> github=bmizerany\nBrad Fitzpatrick <bradfitz@golang.org> github=bradfitz\nDaniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing\nGabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr\nKeith Rarick <kr@xph.us> github=kr\nMatthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan\nMatt Layher <mdlayher@gmail.com> github=mdlayher\nPerry Abbott <perry.j.abbott@gmail.com> github=pabbott0\nTatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/Dockerfile",
    "content": "#\n# This Dockerfile builds a recent curl with HTTP/2 client support, using\n# a recent nghttp2 build.\n#\n# See the Makefile for how to tag it. If Docker and that image is found, the\n# Go tests use this curl binary for integration tests.\n#\n\nFROM ubuntu:trusty\n\nRUN apt-get update && \\\n    apt-get upgrade -y && \\\n    apt-get install -y git-core build-essential wget\n\nRUN apt-get install -y --no-install-recommends \\\n       autotools-dev libtool pkg-config zlib1g-dev \\\n       libcunit1-dev libssl-dev libxml2-dev libevent-dev \\\n       automake autoconf\n\n# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:\nENV NGHTTP2_VER af24f8394e43f4\nRUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git\n\nWORKDIR /root/nghttp2\nRUN git reset --hard $NGHTTP2_VER\nRUN autoreconf -i\nRUN automake\nRUN autoconf\nRUN ./configure\nRUN make\nRUN make install\n\nWORKDIR /root\nRUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz\nRUN tar -zxvf curl-7.40.0.tar.gz\nWORKDIR /root/curl-7.40.0\nRUN ./configure --with-ssl --with-nghttp2=/usr/local\nRUN make\nRUN make install\nRUN ldconfig\n\nCMD [\"-h\"]\nENTRYPOINT [\"/usr/local/bin/curl\"]\n\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/HACKING",
    "content": "We only accept contributions from users who have gone through Go's\ncontribution process (signed a CLA).\n\nPlease acknowledge whether you have (and use the same email) if\nsending a pull request.\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/LICENSE",
    "content": "Copyright 2014 Google & the Go AUTHORS\n\nGo AUTHORS are:\nSee https://code.google.com/p/go/source/browse/AUTHORS\n\nLicensed under the terms of Go itself:\nhttps://code.google.com/p/go/source/browse/LICENSE\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/Makefile",
    "content": "curlimage:\n\tdocker build -t gohttp2/curl .\n\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/README",
    "content": "This is a work-in-progress HTTP/2 implementation for Go.\n\nIt will eventually live in the Go standard library and won't require\nany changes to your code to use.  It will just be automatic.\n\nStatus:\n\n* The server support is pretty good. A few things are missing\n  but are being worked on.\n* The client work has just started but shares a lot of code\n  is coming along much quicker.\n\nDocs are at https://godoc.org/github.com/bradfitz/http2\n\nDemo test server at https://http2.golang.org/\n\nHelp & bug reports welcome.\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/buffer.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage http2\n\nimport (\n\t\"errors\"\n)\n\n// buffer is an io.ReadWriteCloser backed by a fixed size buffer.\n// It never allocates, but moves old data as new data is written.\ntype buffer struct {\n\tbuf    []byte\n\tr, w   int\n\tclosed bool\n\terr    error // err to return to reader\n}\n\nvar (\n\terrReadEmpty   = errors.New(\"read from empty buffer\")\n\terrWriteClosed = errors.New(\"write on closed buffer\")\n\terrWriteFull   = errors.New(\"write on full buffer\")\n)\n\n// Read copies bytes from the buffer into p.\n// It is an error to read when no data is available.\nfunc (b *buffer) Read(p []byte) (n int, err error) {\n\tn = copy(p, b.buf[b.r:b.w])\n\tb.r += n\n\tif b.closed && b.r == b.w {\n\t\terr = b.err\n\t} else if b.r == b.w && n == 0 {\n\t\terr = errReadEmpty\n\t}\n\treturn n, err\n}\n\n// Len returns the number of bytes of the unread portion of the buffer.\nfunc (b *buffer) Len() int {\n\treturn b.w - b.r\n}\n\n// Write copies bytes from p into the buffer.\n// It is an error to write more data than the buffer can hold.\nfunc (b *buffer) Write(p []byte) (n int, err error) {\n\tif b.closed {\n\t\treturn 0, errWriteClosed\n\t}\n\n\t// Slide existing data to beginning.\n\tif b.r > 0 && len(p) > len(b.buf)-b.w {\n\t\tcopy(b.buf, b.buf[b.r:b.w])\n\t\tb.w -= b.r\n\t\tb.r = 0\n\t}\n\n\t// Write new data.\n\tn = copy(b.buf[b.w:], p)\n\tb.w += n\n\tif n < len(p) {\n\t\terr = errWriteFull\n\t}\n\treturn n, err\n}\n\n// Close marks the buffer as closed. Future calls to Write will\n// return an error. Future calls to Read, once the buffer is\n// empty, will return err.\nfunc (b *buffer) Close(err error) {\n\tif !b.closed {\n\t\tb.closed = true\n\t\tb.err = err\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/errors.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage http2\n\nimport \"fmt\"\n\n// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.\ntype ErrCode uint32\n\nconst (\n\tErrCodeNo                 ErrCode = 0x0\n\tErrCodeProtocol           ErrCode = 0x1\n\tErrCodeInternal           ErrCode = 0x2\n\tErrCodeFlowControl        ErrCode = 0x3\n\tErrCodeSettingsTimeout    ErrCode = 0x4\n\tErrCodeStreamClosed       ErrCode = 0x5\n\tErrCodeFrameSize          ErrCode = 0x6\n\tErrCodeRefusedStream      ErrCode = 0x7\n\tErrCodeCancel             ErrCode = 0x8\n\tErrCodeCompression        ErrCode = 0x9\n\tErrCodeConnect            ErrCode = 0xa\n\tErrCodeEnhanceYourCalm    ErrCode = 0xb\n\tErrCodeInadequateSecurity ErrCode = 0xc\n\tErrCodeHTTP11Required     ErrCode = 0xd\n)\n\nvar errCodeName = map[ErrCode]string{\n\tErrCodeNo:                 \"NO_ERROR\",\n\tErrCodeProtocol:           \"PROTOCOL_ERROR\",\n\tErrCodeInternal:           \"INTERNAL_ERROR\",\n\tErrCodeFlowControl:        \"FLOW_CONTROL_ERROR\",\n\tErrCodeSettingsTimeout:    \"SETTINGS_TIMEOUT\",\n\tErrCodeStreamClosed:       \"STREAM_CLOSED\",\n\tErrCodeFrameSize:          \"FRAME_SIZE_ERROR\",\n\tErrCodeRefusedStream:      \"REFUSED_STREAM\",\n\tErrCodeCancel:             \"CANCEL\",\n\tErrCodeCompression:        \"COMPRESSION_ERROR\",\n\tErrCodeConnect:            \"CONNECT_ERROR\",\n\tErrCodeEnhanceYourCalm:    \"ENHANCE_YOUR_CALM\",\n\tErrCodeInadequateSecurity: \"INADEQUATE_SECURITY\",\n\tErrCodeHTTP11Required:     \"HTTP_1_1_REQUIRED\",\n}\n\nfunc (e ErrCode) String() string {\n\tif s, ok := errCodeName[e]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"unknown error code 0x%x\", uint32(e))\n}\n\n// ConnectionError is an error that results in the termination of the\n// entire connection.\ntype ConnectionError ErrCode\n\nfunc (e ConnectionError) Error() string { return fmt.Sprintf(\"connection error: %s\", ErrCode(e)) }\n\n// StreamError is an error that only affects one stream within an\n// HTTP/2 connection.\ntype StreamError struct {\n\tStreamID uint32\n\tCode     ErrCode\n}\n\nfunc (e StreamError) Error() string {\n\treturn fmt.Sprintf(\"stream error: stream ID %d; %v\", e.StreamID, e.Code)\n}\n\n// 6.9.1 The Flow Control Window\n// \"If a sender receives a WINDOW_UPDATE that causes a flow control\n// window to exceed this maximum it MUST terminate either the stream\n// or the connection, as appropriate. For streams, [...]; for the\n// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code.\"\ntype goAwayFlowError struct{}\n\nfunc (goAwayFlowError) Error() string { return \"connection exceeded flow control window size\" }\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/flow.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\n// Flow control\n\npackage http2\n\n// flow is the flow control window's size.\ntype flow struct {\n\t// n is the number of DATA bytes we're allowed to send.\n\t// A flow is kept both on a conn and a per-stream.\n\tn int32\n\n\t// conn points to the shared connection-level flow that is\n\t// shared by all streams on that conn. It is nil for the flow\n\t// that's on the conn directly.\n\tconn *flow\n}\n\nfunc (f *flow) setConnFlow(cf *flow) { f.conn = cf }\n\nfunc (f *flow) available() int32 {\n\tn := f.n\n\tif f.conn != nil && f.conn.n < n {\n\t\tn = f.conn.n\n\t}\n\treturn n\n}\n\nfunc (f *flow) take(n int32) {\n\tif n > f.available() {\n\t\tpanic(\"internal error: took too much\")\n\t}\n\tf.n -= n\n\tif f.conn != nil {\n\t\tf.conn.n -= n\n\t}\n}\n\n// add adds n bytes (positive or negative) to the flow control window.\n// It returns false if the sum would exceed 2^31-1.\nfunc (f *flow) add(n int32) bool {\n\tremain := (1<<31 - 1) - f.n\n\tif n > remain {\n\t\treturn false\n\t}\n\tf.n += n\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/frame.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n)\n\nconst frameHeaderLen = 9\n\nvar padZeros = make([]byte, 255) // zeros for padding\n\n// A FrameType is a registered frame type as defined in\n// http://http2.github.io/http2-spec/#rfc.section.11.2\ntype FrameType uint8\n\nconst (\n\tFrameData         FrameType = 0x0\n\tFrameHeaders      FrameType = 0x1\n\tFramePriority     FrameType = 0x2\n\tFrameRSTStream    FrameType = 0x3\n\tFrameSettings     FrameType = 0x4\n\tFramePushPromise  FrameType = 0x5\n\tFramePing         FrameType = 0x6\n\tFrameGoAway       FrameType = 0x7\n\tFrameWindowUpdate FrameType = 0x8\n\tFrameContinuation FrameType = 0x9\n)\n\nvar frameName = map[FrameType]string{\n\tFrameData:         \"DATA\",\n\tFrameHeaders:      \"HEADERS\",\n\tFramePriority:     \"PRIORITY\",\n\tFrameRSTStream:    \"RST_STREAM\",\n\tFrameSettings:     \"SETTINGS\",\n\tFramePushPromise:  \"PUSH_PROMISE\",\n\tFramePing:         \"PING\",\n\tFrameGoAway:       \"GOAWAY\",\n\tFrameWindowUpdate: \"WINDOW_UPDATE\",\n\tFrameContinuation: \"CONTINUATION\",\n}\n\nfunc (t FrameType) String() string {\n\tif s, ok := frameName[t]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"UNKNOWN_FRAME_TYPE_%d\", uint8(t))\n}\n\n// Flags is a bitmask of HTTP/2 flags.\n// The meaning of flags varies depending on the frame type.\ntype Flags uint8\n\n// Has reports whether f contains all (0 or more) flags in v.\nfunc (f Flags) Has(v Flags) bool {\n\treturn (f & v) == v\n}\n\n// Frame-specific FrameHeader flag bits.\nconst (\n\t// Data Frame\n\tFlagDataEndStream Flags = 0x1\n\tFlagDataPadded    Flags = 0x8\n\n\t// Headers Frame\n\tFlagHeadersEndStream  Flags = 0x1\n\tFlagHeadersEndHeaders Flags = 0x4\n\tFlagHeadersPadded     Flags = 0x8\n\tFlagHeadersPriority   Flags = 0x20\n\n\t// Settings Frame\n\tFlagSettingsAck Flags = 0x1\n\n\t// Ping Frame\n\tFlagPingAck Flags = 0x1\n\n\t// Continuation Frame\n\tFlagContinuationEndHeaders Flags = 0x4\n\n\tFlagPushPromiseEndHeaders Flags = 0x4\n\tFlagPushPromisePadded     Flags = 0x8\n)\n\nvar flagName = map[FrameType]map[Flags]string{\n\tFrameData: {\n\t\tFlagDataEndStream: \"END_STREAM\",\n\t\tFlagDataPadded:    \"PADDED\",\n\t},\n\tFrameHeaders: {\n\t\tFlagHeadersEndStream:  \"END_STREAM\",\n\t\tFlagHeadersEndHeaders: \"END_HEADERS\",\n\t\tFlagHeadersPadded:     \"PADDED\",\n\t\tFlagHeadersPriority:   \"PRIORITY\",\n\t},\n\tFrameSettings: {\n\t\tFlagSettingsAck: \"ACK\",\n\t},\n\tFramePing: {\n\t\tFlagPingAck: \"ACK\",\n\t},\n\tFrameContinuation: {\n\t\tFlagContinuationEndHeaders: \"END_HEADERS\",\n\t},\n\tFramePushPromise: {\n\t\tFlagPushPromiseEndHeaders: \"END_HEADERS\",\n\t\tFlagPushPromisePadded:     \"PADDED\",\n\t},\n}\n\n// a frameParser parses a frame given its FrameHeader and payload\n// bytes. The length of payload will always equal fh.Length (which\n// might be 0).\ntype frameParser func(fh FrameHeader, payload []byte) (Frame, error)\n\nvar frameParsers = map[FrameType]frameParser{\n\tFrameData:         parseDataFrame,\n\tFrameHeaders:      parseHeadersFrame,\n\tFramePriority:     parsePriorityFrame,\n\tFrameRSTStream:    parseRSTStreamFrame,\n\tFrameSettings:     parseSettingsFrame,\n\tFramePushPromise:  parsePushPromise,\n\tFramePing:         parsePingFrame,\n\tFrameGoAway:       parseGoAwayFrame,\n\tFrameWindowUpdate: parseWindowUpdateFrame,\n\tFrameContinuation: parseContinuationFrame,\n}\n\nfunc typeFrameParser(t FrameType) frameParser {\n\tif f := frameParsers[t]; f != nil {\n\t\treturn f\n\t}\n\treturn parseUnknownFrame\n}\n\n// A FrameHeader is the 9 byte header of all HTTP/2 frames.\n//\n// See http://http2.github.io/http2-spec/#FrameHeader\ntype FrameHeader struct {\n\tvalid bool // caller can access []byte fields in the Frame\n\n\t// Type is the 1 byte frame type. There are ten standard frame\n\t// types, but extension frame types may be written by WriteRawFrame\n\t// and will be returned by ReadFrame (as UnknownFrame).\n\tType FrameType\n\n\t// Flags are the 1 byte of 8 potential bit flags per frame.\n\t// They are specific to the frame type.\n\tFlags Flags\n\n\t// Length is the length of the frame, not including the 9 byte header.\n\t// The maximum size is one byte less than 16MB (uint24), but only\n\t// frames up to 16KB are allowed without peer agreement.\n\tLength uint32\n\n\t// StreamID is which stream this frame is for. Certain frames\n\t// are not stream-specific, in which case this field is 0.\n\tStreamID uint32\n}\n\n// Header returns h. It exists so FrameHeaders can be embedded in other\n// specific frame types and implement the Frame interface.\nfunc (h FrameHeader) Header() FrameHeader { return h }\n\nfunc (h FrameHeader) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"[FrameHeader \")\n\tbuf.WriteString(h.Type.String())\n\tif h.Flags != 0 {\n\t\tbuf.WriteString(\" flags=\")\n\t\tset := 0\n\t\tfor i := uint8(0); i < 8; i++ {\n\t\t\tif h.Flags&(1<<i) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tset++\n\t\t\tif set > 1 {\n\t\t\t\tbuf.WriteByte('|')\n\t\t\t}\n\t\t\tname := flagName[h.Type][Flags(1<<i)]\n\t\t\tif name != \"\" {\n\t\t\t\tbuf.WriteString(name)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(&buf, \"0x%x\", 1<<i)\n\t\t\t}\n\t\t}\n\t}\n\tif h.StreamID != 0 {\n\t\tfmt.Fprintf(&buf, \" stream=%d\", h.StreamID)\n\t}\n\tfmt.Fprintf(&buf, \" len=%d]\", h.Length)\n\treturn buf.String()\n}\n\nfunc (h *FrameHeader) checkValid() {\n\tif !h.valid {\n\t\tpanic(\"Frame accessor called on non-owned Frame\")\n\t}\n}\n\nfunc (h *FrameHeader) invalidate() { h.valid = false }\n\n// frame header bytes.\n// Used only by ReadFrameHeader.\nvar fhBytes = sync.Pool{\n\tNew: func() interface{} {\n\t\tbuf := make([]byte, frameHeaderLen)\n\t\treturn &buf\n\t},\n}\n\n// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.\n// Most users should use Framer.ReadFrame instead.\nfunc ReadFrameHeader(r io.Reader) (FrameHeader, error) {\n\tbufp := fhBytes.Get().(*[]byte)\n\tdefer fhBytes.Put(bufp)\n\treturn readFrameHeader(*bufp, r)\n}\n\nfunc readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {\n\t_, err := io.ReadFull(r, buf[:frameHeaderLen])\n\tif err != nil {\n\t\treturn FrameHeader{}, err\n\t}\n\treturn FrameHeader{\n\t\tLength:   (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),\n\t\tType:     FrameType(buf[3]),\n\t\tFlags:    Flags(buf[4]),\n\t\tStreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),\n\t\tvalid:    true,\n\t}, nil\n}\n\n// A Frame is the base interface implemented by all frame types.\n// Callers will generally type-assert the specific frame type:\n// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.\n//\n// Frames are only valid until the next call to Framer.ReadFrame.\ntype Frame interface {\n\tHeader() FrameHeader\n\n\t// invalidate is called by Framer.ReadFrame to make this\n\t// frame's buffers as being invalid, since the subsequent\n\t// frame will reuse them.\n\tinvalidate()\n}\n\n// A Framer reads and writes Frames.\ntype Framer struct {\n\tr         io.Reader\n\tlastFrame Frame\n\n\tmaxReadSize uint32\n\theaderBuf   [frameHeaderLen]byte\n\n\t// TODO: let getReadBuf be configurable, and use a less memory-pinning\n\t// allocator in server.go to minimize memory pinned for many idle conns.\n\t// Will probably also need to make frame invalidation have a hook too.\n\tgetReadBuf func(size uint32) []byte\n\treadBuf    []byte // cache for default getReadBuf\n\n\tmaxWriteSize uint32 // zero means unlimited; TODO: implement\n\n\tw    io.Writer\n\twbuf []byte\n\n\t// AllowIllegalWrites permits the Framer's Write methods to\n\t// write frames that do not conform to the HTTP/2 spec.  This\n\t// permits using the Framer to test other HTTP/2\n\t// implementations' conformance to the spec.\n\t// If false, the Write methods will prefer to return an error\n\t// rather than comply.\n\tAllowIllegalWrites bool\n\n\t// TODO: track which type of frame & with which flags was sent\n\t// last.  Then return an error (unless AllowIllegalWrites) if\n\t// we're in the middle of a header block and a\n\t// non-Continuation or Continuation on a different stream is\n\t// attempted to be written.\n}\n\nfunc (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {\n\t// Write the FrameHeader.\n\tf.wbuf = append(f.wbuf[:0],\n\t\t0, // 3 bytes of length, filled in in endWrite\n\t\t0,\n\t\t0,\n\t\tbyte(ftype),\n\t\tbyte(flags),\n\t\tbyte(streamID>>24),\n\t\tbyte(streamID>>16),\n\t\tbyte(streamID>>8),\n\t\tbyte(streamID))\n}\n\nfunc (f *Framer) endWrite() error {\n\t// Now that we know the final size, fill in the FrameHeader in\n\t// the space previously reserved for it. Abuse append.\n\tlength := len(f.wbuf) - frameHeaderLen\n\tif length >= (1 << 24) {\n\t\treturn ErrFrameTooLarge\n\t}\n\t_ = append(f.wbuf[:0],\n\t\tbyte(length>>16),\n\t\tbyte(length>>8),\n\t\tbyte(length))\n\tn, err := f.w.Write(f.wbuf)\n\tif err == nil && n != len(f.wbuf) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\nfunc (f *Framer) writeByte(v byte)     { f.wbuf = append(f.wbuf, v) }\nfunc (f *Framer) writeBytes(v []byte)  { f.wbuf = append(f.wbuf, v...) }\nfunc (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }\nfunc (f *Framer) writeUint32(v uint32) {\n\tf.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))\n}\n\nconst (\n\tminMaxFrameSize = 1 << 14\n\tmaxFrameSize    = 1<<24 - 1\n)\n\n// NewFramer returns a Framer that writes frames to w and reads them from r.\nfunc NewFramer(w io.Writer, r io.Reader) *Framer {\n\tfr := &Framer{\n\t\tw: w,\n\t\tr: r,\n\t}\n\tfr.getReadBuf = func(size uint32) []byte {\n\t\tif cap(fr.readBuf) >= int(size) {\n\t\t\treturn fr.readBuf[:size]\n\t\t}\n\t\tfr.readBuf = make([]byte, size)\n\t\treturn fr.readBuf\n\t}\n\tfr.SetMaxReadFrameSize(maxFrameSize)\n\treturn fr\n}\n\n// SetMaxReadFrameSize sets the maximum size of a frame\n// that will be read by a subsequent call to ReadFrame.\n// It is the caller's responsibility to advertise this\n// limit with a SETTINGS frame.\nfunc (fr *Framer) SetMaxReadFrameSize(v uint32) {\n\tif v > maxFrameSize {\n\t\tv = maxFrameSize\n\t}\n\tfr.maxReadSize = v\n}\n\n// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer\n// sends a frame that is larger than declared with SetMaxReadFrameSize.\nvar ErrFrameTooLarge = errors.New(\"http2: frame too large\")\n\n// ReadFrame reads a single frame. The returned Frame is only valid\n// until the next call to ReadFrame.\n// If the frame is larger than previously set with SetMaxReadFrameSize,\n// the returned error is ErrFrameTooLarge.\nfunc (fr *Framer) ReadFrame() (Frame, error) {\n\tif fr.lastFrame != nil {\n\t\tfr.lastFrame.invalidate()\n\t}\n\tfh, err := readFrameHeader(fr.headerBuf[:], fr.r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif fh.Length > fr.maxReadSize {\n\t\treturn nil, ErrFrameTooLarge\n\t}\n\tpayload := fr.getReadBuf(fh.Length)\n\tif _, err := io.ReadFull(fr.r, payload); err != nil {\n\t\treturn nil, err\n\t}\n\tf, err := typeFrameParser(fh.Type)(fh, payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfr.lastFrame = f\n\treturn f, nil\n}\n\n// A DataFrame conveys arbitrary, variable-length sequences of octets\n// associated with a stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.1\ntype DataFrame struct {\n\tFrameHeader\n\tdata []byte\n}\n\nfunc (f *DataFrame) StreamEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagDataEndStream)\n}\n\n// Data returns the frame's data octets, not including any padding\n// size byte or padding suffix bytes.\n// The caller must not retain the returned memory past the next\n// call to ReadFrame.\nfunc (f *DataFrame) Data() []byte {\n\tf.checkValid()\n\treturn f.data\n}\n\nfunc parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {\n\tif fh.StreamID == 0 {\n\t\t// DATA frames MUST be associated with a stream. If a\n\t\t// DATA frame is received whose stream identifier\n\t\t// field is 0x0, the recipient MUST respond with a\n\t\t// connection error (Section 5.4.1) of type\n\t\t// PROTOCOL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tf := &DataFrame{\n\t\tFrameHeader: fh,\n\t}\n\tvar padSize byte\n\tif fh.Flags.Has(FlagDataPadded) {\n\t\tvar err error\n\t\tpayload, padSize, err = readByte(payload)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif int(padSize) > len(payload) {\n\t\t// If the length of the padding is greater than the\n\t\t// length of the frame payload, the recipient MUST\n\t\t// treat this as a connection error.\n\t\t// Filed: https://github.com/http2/http2-spec/issues/610\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tf.data = payload[:len(payload)-int(padSize)]\n\treturn f, nil\n}\n\nvar errStreamID = errors.New(\"invalid streamid\")\n\nfunc validStreamID(streamID uint32) bool {\n\treturn streamID != 0 && streamID&(1<<31) == 0\n}\n\n// WriteData writes a DATA frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {\n\t// TODO: ignoring padding for now. will add when somebody cares.\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tvar flags Flags\n\tif endStream {\n\t\tflags |= FlagDataEndStream\n\t}\n\tf.startWrite(FrameData, flags, streamID)\n\tf.wbuf = append(f.wbuf, data...)\n\treturn f.endWrite()\n}\n\n// A SettingsFrame conveys configuration parameters that affect how\n// endpoints communicate, such as preferences and constraints on peer\n// behavior.\n//\n// See http://http2.github.io/http2-spec/#SETTINGS\ntype SettingsFrame struct {\n\tFrameHeader\n\tp []byte\n}\n\nfunc parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {\n\tif fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {\n\t\t// When this (ACK 0x1) bit is set, the payload of the\n\t\t// SETTINGS frame MUST be empty.  Receipt of a\n\t\t// SETTINGS frame with the ACK flag set and a length\n\t\t// field value other than 0 MUST be treated as a\n\t\t// connection error (Section 5.4.1) of type\n\t\t// FRAME_SIZE_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tif fh.StreamID != 0 {\n\t\t// SETTINGS frames always apply to a connection,\n\t\t// never a single stream.  The stream identifier for a\n\t\t// SETTINGS frame MUST be zero (0x0).  If an endpoint\n\t\t// receives a SETTINGS frame whose stream identifier\n\t\t// field is anything other than 0x0, the endpoint MUST\n\t\t// respond with a connection error (Section 5.4.1) of\n\t\t// type PROTOCOL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tif len(p)%6 != 0 {\n\t\t// Expecting even number of 6 byte settings.\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tf := &SettingsFrame{FrameHeader: fh, p: p}\n\tif v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {\n\t\t// Values above the maximum flow control window size of 2^31 - 1 MUST\n\t\t// be treated as a connection error (Section 5.4.1) of type\n\t\t// FLOW_CONTROL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeFlowControl)\n\t}\n\treturn f, nil\n}\n\nfunc (f *SettingsFrame) IsAck() bool {\n\treturn f.FrameHeader.Flags.Has(FlagSettingsAck)\n}\n\nfunc (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {\n\tf.checkValid()\n\tbuf := f.p\n\tfor len(buf) > 0 {\n\t\tsettingID := SettingID(binary.BigEndian.Uint16(buf[:2]))\n\t\tif settingID == s {\n\t\t\treturn binary.BigEndian.Uint32(buf[2:6]), true\n\t\t}\n\t\tbuf = buf[6:]\n\t}\n\treturn 0, false\n}\n\n// ForeachSetting runs fn for each setting.\n// It stops and returns the first error.\nfunc (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {\n\tf.checkValid()\n\tbuf := f.p\n\tfor len(buf) > 0 {\n\t\tif err := fn(Setting{\n\t\t\tSettingID(binary.BigEndian.Uint16(buf[:2])),\n\t\t\tbinary.BigEndian.Uint32(buf[2:6]),\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuf = buf[6:]\n\t}\n\treturn nil\n}\n\n// WriteSettings writes a SETTINGS frame with zero or more settings\n// specified and the ACK bit not set.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteSettings(settings ...Setting) error {\n\tf.startWrite(FrameSettings, 0, 0)\n\tfor _, s := range settings {\n\t\tf.writeUint16(uint16(s.ID))\n\t\tf.writeUint32(s.Val)\n\t}\n\treturn f.endWrite()\n}\n\n// WriteSettings writes an empty SETTINGS frame with the ACK bit set.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteSettingsAck() error {\n\tf.startWrite(FrameSettings, FlagSettingsAck, 0)\n\treturn f.endWrite()\n}\n\n// A PingFrame is a mechanism for measuring a minimal round trip time\n// from the sender, as well as determining whether an idle connection\n// is still functional.\n// See http://http2.github.io/http2-spec/#rfc.section.6.7\ntype PingFrame struct {\n\tFrameHeader\n\tData [8]byte\n}\n\nfunc parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {\n\tif len(payload) != 8 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tif fh.StreamID != 0 {\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tf := &PingFrame{FrameHeader: fh}\n\tcopy(f.Data[:], payload)\n\treturn f, nil\n}\n\nfunc (f *Framer) WritePing(ack bool, data [8]byte) error {\n\tvar flags Flags\n\tif ack {\n\t\tflags = FlagPingAck\n\t}\n\tf.startWrite(FramePing, flags, 0)\n\tf.writeBytes(data[:])\n\treturn f.endWrite()\n}\n\n// A GoAwayFrame informs the remote peer to stop creating streams on this connection.\n// See http://http2.github.io/http2-spec/#rfc.section.6.8\ntype GoAwayFrame struct {\n\tFrameHeader\n\tLastStreamID uint32\n\tErrCode      ErrCode\n\tdebugData    []byte\n}\n\n// DebugData returns any debug data in the GOAWAY frame. Its contents\n// are not defined.\n// The caller must not retain the returned memory past the next\n// call to ReadFrame.\nfunc (f *GoAwayFrame) DebugData() []byte {\n\tf.checkValid()\n\treturn f.debugData\n}\n\nfunc parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {\n\tif fh.StreamID != 0 {\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tif len(p) < 8 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\treturn &GoAwayFrame{\n\t\tFrameHeader:  fh,\n\t\tLastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),\n\t\tErrCode:      ErrCode(binary.BigEndian.Uint32(p[4:8])),\n\t\tdebugData:    p[8:],\n\t}, nil\n}\n\nfunc (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {\n\tf.startWrite(FrameGoAway, 0, 0)\n\tf.writeUint32(maxStreamID & (1<<31 - 1))\n\tf.writeUint32(uint32(code))\n\tf.writeBytes(debugData)\n\treturn f.endWrite()\n}\n\n// An UnknownFrame is the frame type returned when the frame type is unknown\n// or no specific frame type parser exists.\ntype UnknownFrame struct {\n\tFrameHeader\n\tp []byte\n}\n\n// Payload returns the frame's payload (after the header).  It is not\n// valid to call this method after a subsequent call to\n// Framer.ReadFrame, nor is it valid to retain the returned slice.\n// The memory is owned by the Framer and is invalidated when the next\n// frame is read.\nfunc (f *UnknownFrame) Payload() []byte {\n\tf.checkValid()\n\treturn f.p\n}\n\nfunc parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {\n\treturn &UnknownFrame{fh, p}, nil\n}\n\n// A WindowUpdateFrame is used to implement flow control.\n// See http://http2.github.io/http2-spec/#rfc.section.6.9\ntype WindowUpdateFrame struct {\n\tFrameHeader\n\tIncrement uint32\n}\n\nfunc parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {\n\tif len(p) != 4 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tinc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit\n\tif inc == 0 {\n\t\t// A receiver MUST treat the receipt of a\n\t\t// WINDOW_UPDATE frame with an flow control window\n\t\t// increment of 0 as a stream error (Section 5.4.2) of\n\t\t// type PROTOCOL_ERROR; errors on the connection flow\n\t\t// control window MUST be treated as a connection\n\t\t// error (Section 5.4.1).\n\t\tif fh.StreamID == 0 {\n\t\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\treturn nil, StreamError{fh.StreamID, ErrCodeProtocol}\n\t}\n\treturn &WindowUpdateFrame{\n\t\tFrameHeader: fh,\n\t\tIncrement:   inc,\n\t}, nil\n}\n\n// WriteWindowUpdate writes a WINDOW_UPDATE frame.\n// The increment value must be between 1 and 2,147,483,647, inclusive.\n// If the Stream ID is zero, the window update applies to the\n// connection as a whole.\nfunc (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {\n\t// \"The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets.\"\n\tif (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {\n\t\treturn errors.New(\"illegal window increment value\")\n\t}\n\tf.startWrite(FrameWindowUpdate, 0, streamID)\n\tf.writeUint32(incr)\n\treturn f.endWrite()\n}\n\n// A HeadersFrame is used to open a stream and additionally carries a\n// header block fragment.\ntype HeadersFrame struct {\n\tFrameHeader\n\n\t// Priority is set if FlagHeadersPriority is set in the FrameHeader.\n\tPriority PriorityParam\n\n\theaderFragBuf []byte // not owned\n}\n\nfunc (f *HeadersFrame) HeaderBlockFragment() []byte {\n\tf.checkValid()\n\treturn f.headerFragBuf\n}\n\nfunc (f *HeadersFrame) HeadersEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)\n}\n\nfunc (f *HeadersFrame) StreamEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagHeadersEndStream)\n}\n\nfunc (f *HeadersFrame) HasPriority() bool {\n\treturn f.FrameHeader.Flags.Has(FlagHeadersPriority)\n}\n\nfunc parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {\n\thf := &HeadersFrame{\n\t\tFrameHeader: fh,\n\t}\n\tif fh.StreamID == 0 {\n\t\t// HEADERS frames MUST be associated with a stream.  If a HEADERS frame\n\t\t// is received whose stream identifier field is 0x0, the recipient MUST\n\t\t// respond with a connection error (Section 5.4.1) of type\n\t\t// PROTOCOL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tvar padLength uint8\n\tif fh.Flags.Has(FlagHeadersPadded) {\n\t\tif p, padLength, err = readByte(p); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif fh.Flags.Has(FlagHeadersPriority) {\n\t\tvar v uint32\n\t\tp, v, err = readUint32(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thf.Priority.StreamDep = v & 0x7fffffff\n\t\thf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set\n\t\tp, hf.Priority.Weight, err = readByte(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif len(p)-int(padLength) <= 0 {\n\t\treturn nil, StreamError{fh.StreamID, ErrCodeProtocol}\n\t}\n\thf.headerFragBuf = p[:len(p)-int(padLength)]\n\treturn hf, nil\n}\n\n// HeadersFrameParam are the parameters for writing a HEADERS frame.\ntype HeadersFrameParam struct {\n\t// StreamID is the required Stream ID to initiate.\n\tStreamID uint32\n\t// BlockFragment is part (or all) of a Header Block.\n\tBlockFragment []byte\n\n\t// EndStream indicates that the header block is the last that\n\t// the endpoint will send for the identified stream. Setting\n\t// this flag causes the stream to enter one of \"half closed\"\n\t// states.\n\tEndStream bool\n\n\t// EndHeaders indicates that this frame contains an entire\n\t// header block and is not followed by any\n\t// CONTINUATION frames.\n\tEndHeaders bool\n\n\t// PadLength is the optional number of bytes of zeros to add\n\t// to this frame.\n\tPadLength uint8\n\n\t// Priority, if non-zero, includes stream priority information\n\t// in the HEADER frame.\n\tPriority PriorityParam\n}\n\n// WriteHeaders writes a single HEADERS frame.\n//\n// This is a low-level header writing method. Encoding headers and\n// splitting them into any necessary CONTINUATION frames is handled\n// elsewhere.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteHeaders(p HeadersFrameParam) error {\n\tif !validStreamID(p.StreamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tvar flags Flags\n\tif p.PadLength != 0 {\n\t\tflags |= FlagHeadersPadded\n\t}\n\tif p.EndStream {\n\t\tflags |= FlagHeadersEndStream\n\t}\n\tif p.EndHeaders {\n\t\tflags |= FlagHeadersEndHeaders\n\t}\n\tif !p.Priority.IsZero() {\n\t\tflags |= FlagHeadersPriority\n\t}\n\tf.startWrite(FrameHeaders, flags, p.StreamID)\n\tif p.PadLength != 0 {\n\t\tf.writeByte(p.PadLength)\n\t}\n\tif !p.Priority.IsZero() {\n\t\tv := p.Priority.StreamDep\n\t\tif !validStreamID(v) && !f.AllowIllegalWrites {\n\t\t\treturn errors.New(\"invalid dependent stream id\")\n\t\t}\n\t\tif p.Priority.Exclusive {\n\t\t\tv |= 1 << 31\n\t\t}\n\t\tf.writeUint32(v)\n\t\tf.writeByte(p.Priority.Weight)\n\t}\n\tf.wbuf = append(f.wbuf, p.BlockFragment...)\n\tf.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)\n\treturn f.endWrite()\n}\n\n// A PriorityFrame specifies the sender-advised priority of a stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.3\ntype PriorityFrame struct {\n\tFrameHeader\n\tPriorityParam\n}\n\n// PriorityParam are the stream prioritzation parameters.\ntype PriorityParam struct {\n\t// StreamDep is a 31-bit stream identifier for the\n\t// stream that this stream depends on. Zero means no\n\t// dependency.\n\tStreamDep uint32\n\n\t// Exclusive is whether the dependency is exclusive.\n\tExclusive bool\n\n\t// Weight is the stream's zero-indexed weight. It should be\n\t// set together with StreamDep, or neither should be set.  Per\n\t// the spec, \"Add one to the value to obtain a weight between\n\t// 1 and 256.\"\n\tWeight uint8\n}\n\nfunc (p PriorityParam) IsZero() bool {\n\treturn p == PriorityParam{}\n}\n\nfunc parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {\n\tif fh.StreamID == 0 {\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tif len(payload) != 5 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tv := binary.BigEndian.Uint32(payload[:4])\n\tstreamID := v & 0x7fffffff // mask off high bit\n\treturn &PriorityFrame{\n\t\tFrameHeader: fh,\n\t\tPriorityParam: PriorityParam{\n\t\t\tWeight:    payload[4],\n\t\t\tStreamDep: streamID,\n\t\t\tExclusive: streamID != v, // was high bit set?\n\t\t},\n\t}, nil\n}\n\n// WritePriority writes a PRIORITY frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tf.startWrite(FramePriority, 0, streamID)\n\tv := p.StreamDep\n\tif p.Exclusive {\n\t\tv |= 1 << 31\n\t}\n\tf.writeUint32(v)\n\tf.writeByte(p.Weight)\n\treturn f.endWrite()\n}\n\n// A RSTStreamFrame allows for abnormal termination of a stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.4\ntype RSTStreamFrame struct {\n\tFrameHeader\n\tErrCode ErrCode\n}\n\nfunc parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {\n\tif len(p) != 4 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tif fh.StreamID == 0 {\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\treturn &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil\n}\n\n// WriteRSTStream writes a RST_STREAM frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tf.startWrite(FrameRSTStream, 0, streamID)\n\tf.writeUint32(uint32(code))\n\treturn f.endWrite()\n}\n\n// A ContinuationFrame is used to continue a sequence of header block fragments.\n// See http://http2.github.io/http2-spec/#rfc.section.6.10\ntype ContinuationFrame struct {\n\tFrameHeader\n\theaderFragBuf []byte\n}\n\nfunc parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {\n\treturn &ContinuationFrame{fh, p}, nil\n}\n\nfunc (f *ContinuationFrame) StreamEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagDataEndStream)\n}\n\nfunc (f *ContinuationFrame) HeaderBlockFragment() []byte {\n\tf.checkValid()\n\treturn f.headerFragBuf\n}\n\nfunc (f *ContinuationFrame) HeadersEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)\n}\n\n// WriteContinuation writes a CONTINUATION frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tvar flags Flags\n\tif endHeaders {\n\t\tflags |= FlagContinuationEndHeaders\n\t}\n\tf.startWrite(FrameContinuation, flags, streamID)\n\tf.wbuf = append(f.wbuf, headerBlockFragment...)\n\treturn f.endWrite()\n}\n\n// A PushPromiseFrame is used to initiate a server stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.6\ntype PushPromiseFrame struct {\n\tFrameHeader\n\tPromiseID     uint32\n\theaderFragBuf []byte // not owned\n}\n\nfunc (f *PushPromiseFrame) HeaderBlockFragment() []byte {\n\tf.checkValid()\n\treturn f.headerFragBuf\n}\n\nfunc (f *PushPromiseFrame) HeadersEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)\n}\n\nfunc parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {\n\tpp := &PushPromiseFrame{\n\t\tFrameHeader: fh,\n\t}\n\tif pp.StreamID == 0 {\n\t\t// PUSH_PROMISE frames MUST be associated with an existing,\n\t\t// peer-initiated stream. The stream identifier of a\n\t\t// PUSH_PROMISE frame indicates the stream it is associated\n\t\t// with. If the stream identifier field specifies the value\n\t\t// 0x0, a recipient MUST respond with a connection error\n\t\t// (Section 5.4.1) of type PROTOCOL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\t// The PUSH_PROMISE frame includes optional padding.\n\t// Padding fields and flags are identical to those defined for DATA frames\n\tvar padLength uint8\n\tif fh.Flags.Has(FlagPushPromisePadded) {\n\t\tif p, padLength, err = readByte(p); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tp, pp.PromiseID, err = readUint32(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tpp.PromiseID = pp.PromiseID & (1<<31 - 1)\n\n\tif int(padLength) > len(p) {\n\t\t// like the DATA frame, error out if padding is longer than the body.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tpp.headerFragBuf = p[:len(p)-int(padLength)]\n\treturn pp, nil\n}\n\n// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.\ntype PushPromiseParam struct {\n\t// StreamID is the required Stream ID to initiate.\n\tStreamID uint32\n\n\t// PromiseID is the required Stream ID which this\n\t// Push Promises\n\tPromiseID uint32\n\n\t// BlockFragment is part (or all) of a Header Block.\n\tBlockFragment []byte\n\n\t// EndHeaders indicates that this frame contains an entire\n\t// header block and is not followed by any\n\t// CONTINUATION frames.\n\tEndHeaders bool\n\n\t// PadLength is the optional number of bytes of zeros to add\n\t// to this frame.\n\tPadLength uint8\n}\n\n// WritePushPromise writes a single PushPromise Frame.\n//\n// As with Header Frames, This is the low level call for writing\n// individual frames. Continuation frames are handled elsewhere.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WritePushPromise(p PushPromiseParam) error {\n\tif !validStreamID(p.StreamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tvar flags Flags\n\tif p.PadLength != 0 {\n\t\tflags |= FlagPushPromisePadded\n\t}\n\tif p.EndHeaders {\n\t\tflags |= FlagPushPromiseEndHeaders\n\t}\n\tf.startWrite(FramePushPromise, flags, p.StreamID)\n\tif p.PadLength != 0 {\n\t\tf.writeByte(p.PadLength)\n\t}\n\tif !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tf.writeUint32(p.PromiseID)\n\tf.wbuf = append(f.wbuf, p.BlockFragment...)\n\tf.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)\n\treturn f.endWrite()\n}\n\n// WriteRawFrame writes a raw frame. This can be used to write\n// extension frames unknown to this package.\nfunc (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {\n\tf.startWrite(t, flags, streamID)\n\tf.writeBytes(payload)\n\treturn f.endWrite()\n}\n\nfunc readByte(p []byte) (remain []byte, b byte, err error) {\n\tif len(p) == 0 {\n\t\treturn nil, 0, io.ErrUnexpectedEOF\n\t}\n\treturn p[1:], p[0], nil\n}\n\nfunc readUint32(p []byte) (remain []byte, v uint32, err error) {\n\tif len(p) < 4 {\n\t\treturn nil, 0, io.ErrUnexpectedEOF\n\t}\n\treturn p[4:], binary.BigEndian.Uint32(p[:4]), nil\n}\n\ntype streamEnder interface {\n\tStreamEnded() bool\n}\n\ntype headersEnder interface {\n\tHeadersEnded() bool\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/gotrack.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\n// Defensive debug-only utility to track that functions run on the\n// goroutine that they're supposed to.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nvar DebugGoroutines = os.Getenv(\"DEBUG_HTTP2_GOROUTINES\") == \"1\"\n\ntype goroutineLock uint64\n\nfunc newGoroutineLock() goroutineLock {\n\tif !DebugGoroutines {\n\t\treturn 0\n\t}\n\treturn goroutineLock(curGoroutineID())\n}\n\nfunc (g goroutineLock) check() {\n\tif !DebugGoroutines {\n\t\treturn\n\t}\n\tif curGoroutineID() != uint64(g) {\n\t\tpanic(\"running on the wrong goroutine\")\n\t}\n}\n\nfunc (g goroutineLock) checkNotOn() {\n\tif !DebugGoroutines {\n\t\treturn\n\t}\n\tif curGoroutineID() == uint64(g) {\n\t\tpanic(\"running on the wrong goroutine\")\n\t}\n}\n\nvar goroutineSpace = []byte(\"goroutine \")\n\nfunc curGoroutineID() uint64 {\n\tbp := littleBuf.Get().(*[]byte)\n\tdefer littleBuf.Put(bp)\n\tb := *bp\n\tb = b[:runtime.Stack(b, false)]\n\t// Parse the 4707 out of \"goroutine 4707 [\"\n\tb = bytes.TrimPrefix(b, goroutineSpace)\n\ti := bytes.IndexByte(b, ' ')\n\tif i < 0 {\n\t\tpanic(fmt.Sprintf(\"No space found in %q\", b))\n\t}\n\tb = b[:i]\n\tn, err := parseUintBytes(b, 10, 64)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to parse goroutine ID out of %q: %v\", b, err))\n\t}\n\treturn n\n}\n\nvar littleBuf = sync.Pool{\n\tNew: func() interface{} {\n\t\tbuf := make([]byte, 64)\n\t\treturn &buf\n\t},\n}\n\n// parseUintBytes is like strconv.ParseUint, but using a []byte.\nfunc parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {\n\tvar cutoff, maxVal uint64\n\n\tif bitSize == 0 {\n\t\tbitSize = int(strconv.IntSize)\n\t}\n\n\ts0 := s\n\tswitch {\n\tcase len(s) < 1:\n\t\terr = strconv.ErrSyntax\n\t\tgoto Error\n\n\tcase 2 <= base && base <= 36:\n\t\t// valid base; nothing to do\n\n\tcase base == 0:\n\t\t// Look for octal, hex prefix.\n\t\tswitch {\n\t\tcase s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):\n\t\t\tbase = 16\n\t\t\ts = s[2:]\n\t\t\tif len(s) < 1 {\n\t\t\t\terr = strconv.ErrSyntax\n\t\t\t\tgoto Error\n\t\t\t}\n\t\tcase s[0] == '0':\n\t\t\tbase = 8\n\t\tdefault:\n\t\t\tbase = 10\n\t\t}\n\n\tdefault:\n\t\terr = errors.New(\"invalid base \" + strconv.Itoa(base))\n\t\tgoto Error\n\t}\n\n\tn = 0\n\tcutoff = cutoff64(base)\n\tmaxVal = 1<<uint(bitSize) - 1\n\n\tfor i := 0; i < len(s); i++ {\n\t\tvar v byte\n\t\td := s[i]\n\t\tswitch {\n\t\tcase '0' <= d && d <= '9':\n\t\t\tv = d - '0'\n\t\tcase 'a' <= d && d <= 'z':\n\t\t\tv = d - 'a' + 10\n\t\tcase 'A' <= d && d <= 'Z':\n\t\t\tv = d - 'A' + 10\n\t\tdefault:\n\t\t\tn = 0\n\t\t\terr = strconv.ErrSyntax\n\t\t\tgoto Error\n\t\t}\n\t\tif int(v) >= base {\n\t\t\tn = 0\n\t\t\terr = strconv.ErrSyntax\n\t\t\tgoto Error\n\t\t}\n\n\t\tif n >= cutoff {\n\t\t\t// n*base overflows\n\t\t\tn = 1<<64 - 1\n\t\t\terr = strconv.ErrRange\n\t\t\tgoto Error\n\t\t}\n\t\tn *= uint64(base)\n\n\t\tn1 := n + uint64(v)\n\t\tif n1 < n || n1 > maxVal {\n\t\t\t// n+v overflows\n\t\t\tn = 1<<64 - 1\n\t\t\terr = strconv.ErrRange\n\t\t\tgoto Error\n\t\t}\n\t\tn = n1\n\t}\n\n\treturn n, nil\n\nError:\n\treturn n, &strconv.NumError{Func: \"ParseUint\", Num: string(s0), Err: err}\n}\n\n// Return the first number n such that n*base >= 1<<64.\nfunc cutoff64(base int) uint64 {\n\tif base < 2 {\n\t\treturn 0\n\t}\n\treturn (1<<64-1)/uint64(base) + 1\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/h2i/README.md",
    "content": "# h2i\n\n**h2i** is an interactive HTTP/2 (\"h2\") console debugger. Miss the good ol'\ndays of telnetting to your HTTP/1.n servers? We're bringing you\nback.\n\nFeatures:\n- send raw HTTP/2 frames\n - PING\n - SETTINGS\n - HEADERS\n - etc\n- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2\n- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)\n- tab completion of commands, options\n\nNot yet features, but soon:\n- unnecessary CONTINUATION frames on short boundaries, to test peer implementations \n- request bodies (DATA frames)\n- send invalid frames for testing server implementations (supported by underlying Framer)\n\nLater:\n- act like a server\n\n## Installation\n\n```\n$ go get github.com/bradfitz/http2/h2i\n$ h2i <host>\n```\n\n## Demo\n\n```\n$ h2i\nUsage: h2i <hostname>\n  \n  -insecure\n        Whether to skip TLS cert validation\n  -nextproto string\n        Comma-separated list of NPN/ALPN protocol names to negotiate. (default \"h2,h2-14\")\n\n$ h2i google.com\nConnecting to google.com:443 ...\nConnected to 74.125.224.41:443\nNegotiated protocol \"h2-14\"\n[FrameHeader SETTINGS len=18]\n  [MAX_CONCURRENT_STREAMS = 100]\n  [INITIAL_WINDOW_SIZE = 1048576]\n  [MAX_FRAME_SIZE = 16384]\n[FrameHeader WINDOW_UPDATE len=4]\n  Window-Increment = 983041\n  \nh2i> PING h2iSayHI\n[FrameHeader PING flags=ACK len=8]\n  Data = \"h2iSayHI\"\nh2i> headers\n(as HTTP/1.1)> GET / HTTP/1.1\n(as HTTP/1.1)> Host: ip.appspot.com\n(as HTTP/1.1)> User-Agent: h2i/brad-n-blake\n(as HTTP/1.1)>  \nOpening Stream-ID 1:\n :authority = ip.appspot.com\n :method = GET\n :path = /\n :scheme = https\n user-agent = h2i/brad-n-blake\n[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]\n  :status = \"200\"\n  alternate-protocol = \"443:quic,p=1\"\n  content-length = \"15\"\n  content-type = \"text/html\"\n  date = \"Fri, 01 May 2015 23:06:56 GMT\"\n  server = \"Google Frontend\"\n[FrameHeader DATA flags=END_STREAM stream=1 len=15]\n  \"173.164.155.78\\n\"\n[FrameHeader PING len=8]\n  Data = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\nh2i> ping  \n[FrameHeader PING flags=ACK len=8]  \n  Data = \"h2i_ping\"  \nh2i> ping  \n[FrameHeader PING flags=ACK len=8]\n  Data = \"h2i_ping\"\nh2i> ping\n[FrameHeader GOAWAY len=22]\n  Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)\n\nReadFrame: EOF\n```\n\n## Status\n\nQuick few hour hack. So much yet to do. Feel free to file issues for\nbugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)\nand I aren't yet accepting pull requests until things settle down.\n\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/h2i/h2i.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\n/*\nThe h2i command is an interactive HTTP/2 console.\n\nUsage:\n  $ h2i [flags] <hostname>\n\nInteractive commands in the console: (all parts case-insensitive)\n\n  ping [data]\n  settings ack\n  settings FOO=n BAR=z\n  headers      (open a new stream by typing HTTP/1.1)\n*/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/bradfitz/http2\"\n\t\"github.com/bradfitz/http2/hpack\"\n\t\"golang.org/x/crypto/ssh/terminal\"\n)\n\n// Flags\nvar (\n\tflagNextProto = flag.String(\"nextproto\", \"h2,h2-14\", \"Comma-separated list of NPN/ALPN protocol names to negotiate.\")\n\tflagInsecure  = flag.Bool(\"insecure\", false, \"Whether to skip TLS cert validation\")\n)\n\ntype command struct {\n\trun func(*h2i, []string) error // required\n\n\t// complete optionally specifies tokens (case-insensitive) which are\n\t// valid for this subcommand.\n\tcomplete func() []string\n}\n\nvar commands = map[string]command{\n\t\"ping\": command{run: (*h2i).cmdPing},\n\t\"settings\": command{\n\t\trun: (*h2i).cmdSettings,\n\t\tcomplete: func() []string {\n\t\t\treturn []string{\n\t\t\t\t\"ACK\",\n\t\t\t\thttp2.SettingHeaderTableSize.String(),\n\t\t\t\thttp2.SettingEnablePush.String(),\n\t\t\t\thttp2.SettingMaxConcurrentStreams.String(),\n\t\t\t\thttp2.SettingInitialWindowSize.String(),\n\t\t\t\thttp2.SettingMaxFrameSize.String(),\n\t\t\t\thttp2.SettingMaxHeaderListSize.String(),\n\t\t\t}\n\t\t},\n\t},\n\t\"quit\":    command{run: (*h2i).cmdQuit},\n\t\"headers\": command{run: (*h2i).cmdHeaders},\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: h2i <hostname>\\n\\n\")\n\tflag.PrintDefaults()\n\tos.Exit(1)\n}\n\n// withPort adds \":443\" if another port isn't already present.\nfunc withPort(host string) string {\n\tif _, _, err := net.SplitHostPort(host); err != nil {\n\t\treturn net.JoinHostPort(host, \"443\")\n\t}\n\treturn host\n}\n\n// h2i is the app's state.\ntype h2i struct {\n\thost   string\n\ttc     *tls.Conn\n\tframer *http2.Framer\n\tterm   *terminal.Terminal\n\n\t// owned by the command loop:\n\tstreamID uint32\n\thbuf     bytes.Buffer\n\thenc     *hpack.Encoder\n\n\t// owned by the readFrames loop:\n\tpeerSetting map[http2.SettingID]uint32\n\thdec        *hpack.Decoder\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t}\n\tlog.SetFlags(0)\n\n\thost := flag.Arg(0)\n\tapp := &h2i{\n\t\thost:        host,\n\t\tpeerSetting: make(map[http2.SettingID]uint32),\n\t}\n\tapp.henc = hpack.NewEncoder(&app.hbuf)\n\n\tif err := app.Main(); err != nil {\n\t\tif app.term != nil {\n\t\t\tapp.logf(\"%v\\n\", err)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t}\n\t\tos.Exit(1)\n\t}\n\tfmt.Fprintf(os.Stdout, \"\\n\")\n}\n\nfunc (app *h2i) Main() error {\n\tcfg := &tls.Config{\n\t\tServerName:         app.host,\n\t\tNextProtos:         strings.Split(*flagNextProto, \",\"),\n\t\tInsecureSkipVerify: *flagInsecure,\n\t}\n\n\thostAndPort := withPort(app.host)\n\tlog.Printf(\"Connecting to %s ...\", hostAndPort)\n\ttc, err := tls.Dial(\"tcp\", hostAndPort, cfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error dialing %s: %v\", withPort(app.host), err)\n\t}\n\tlog.Printf(\"Connected to %v\", tc.RemoteAddr())\n\tdefer tc.Close()\n\n\tif err := tc.Handshake(); err != nil {\n\t\treturn fmt.Errorf(\"TLS handshake: %v\", err)\n\t}\n\tif !*flagInsecure {\n\t\tif err := tc.VerifyHostname(app.host); err != nil {\n\t\t\treturn fmt.Errorf(\"VerifyHostname: %v\", err)\n\t\t}\n\t}\n\tstate := tc.ConnectionState()\n\tlog.Printf(\"Negotiated protocol %q\", state.NegotiatedProtocol)\n\tif !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == \"\" {\n\t\treturn fmt.Errorf(\"Could not negotiate protocol mutually\")\n\t}\n\n\tif _, err := io.WriteString(tc, http2.ClientPreface); err != nil {\n\t\treturn err\n\t}\n\n\tapp.framer = http2.NewFramer(tc, tc)\n\n\toldState, err := terminal.MakeRaw(0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer terminal.Restore(0, oldState)\n\n\tvar screen = struct {\n\t\tio.Reader\n\t\tio.Writer\n\t}{os.Stdin, os.Stdout}\n\n\tapp.term = terminal.NewTerminal(screen, \"h2i> \")\n\tlastWord := regexp.MustCompile(`.+\\W(\\w+)$`)\n\tapp.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {\n\t\tif key != '\\t' {\n\t\t\treturn\n\t\t}\n\t\tif pos != len(line) {\n\t\t\t// TODO: we're being lazy for now, only supporting tab completion at the end.\n\t\t\treturn\n\t\t}\n\t\t// Auto-complete for the command itself.\n\t\tif !strings.Contains(line, \" \") {\n\t\t\tvar name string\n\t\t\tname, _, ok = lookupCommand(line)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn name, len(name), true\n\t\t}\n\t\t_, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])\n\t\tif !ok || c.complete == nil {\n\t\t\treturn\n\t\t}\n\t\tif strings.HasSuffix(line, \" \") {\n\t\t\tapp.logf(\"%s\", strings.Join(c.complete(), \" \"))\n\t\t\treturn line, pos, true\n\t\t}\n\t\tm := lastWord.FindStringSubmatch(line)\n\t\tif m == nil {\n\t\t\treturn line, len(line), true\n\t\t}\n\t\tsoFar := m[1]\n\t\tvar match []string\n\t\tfor _, cand := range c.complete() {\n\t\t\tif len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmatch = append(match, cand)\n\t\t}\n\t\tif len(match) == 0 {\n\t\t\treturn\n\t\t}\n\t\tif len(match) > 1 {\n\t\t\t// TODO: auto-complete any common prefix\n\t\t\tapp.logf(\"%s\", strings.Join(match, \" \"))\n\t\t\treturn line, pos, true\n\t\t}\n\t\tnewLine = line[:len(line)-len(soFar)] + match[0]\n\t\treturn newLine, len(newLine), true\n\n\t}\n\n\terrc := make(chan error, 2)\n\tgo func() { errc <- app.readFrames() }()\n\tgo func() { errc <- app.readConsole() }()\n\treturn <-errc\n}\n\nfunc (app *h2i) logf(format string, args ...interface{}) {\n\tfmt.Fprintf(app.term, format+\"\\n\", args...)\n}\n\nfunc (app *h2i) readConsole() error {\n\tfor {\n\t\tline, err := app.term.ReadLine()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"terminal.ReadLine: %v\", err)\n\t\t}\n\t\tf := strings.Fields(line)\n\t\tif len(f) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tcmd, args := f[0], f[1:]\n\t\tif _, c, ok := lookupCommand(cmd); ok {\n\t\t\terr = c.run(app, args)\n\t\t} else {\n\t\t\tapp.logf(\"Unknown command %q\", line)\n\t\t}\n\t\tif err == errExitApp {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc lookupCommand(prefix string) (name string, c command, ok bool) {\n\tprefix = strings.ToLower(prefix)\n\tif c, ok = commands[prefix]; ok {\n\t\treturn prefix, c, ok\n\t}\n\n\tfor full, candidate := range commands {\n\t\tif strings.HasPrefix(full, prefix) {\n\t\t\tif c.run != nil {\n\t\t\t\treturn \"\", command{}, false // ambiguous\n\t\t\t}\n\t\t\tc = candidate\n\t\t\tname = full\n\t\t}\n\t}\n\treturn name, c, c.run != nil\n}\n\nvar errExitApp = errors.New(\"internal sentinel error value to quit the console reading loop\")\n\nfunc (a *h2i) cmdQuit(args []string) error {\n\tif len(args) > 0 {\n\t\ta.logf(\"the QUIT command takes no argument\")\n\t\treturn nil\n\t}\n\treturn errExitApp\n}\n\nfunc (a *h2i) cmdSettings(args []string) error {\n\tif len(args) == 1 && strings.EqualFold(args[0], \"ACK\") {\n\t\treturn a.framer.WriteSettingsAck()\n\t}\n\tvar settings []http2.Setting\n\tfor _, arg := range args {\n\t\tif strings.EqualFold(arg, \"ACK\") {\n\t\t\ta.logf(\"Error: ACK must be only argument with the SETTINGS command\")\n\t\t\treturn nil\n\t\t}\n\t\teq := strings.Index(arg, \"=\")\n\t\tif eq == -1 {\n\t\t\ta.logf(\"Error: invalid argument %q (expected SETTING_NAME=nnnn)\", arg)\n\t\t\treturn nil\n\t\t}\n\t\tsid, ok := settingByName(arg[:eq])\n\t\tif !ok {\n\t\t\ta.logf(\"Error: unknown setting name %q\", arg[:eq])\n\t\t\treturn nil\n\t\t}\n\t\tval, err := strconv.ParseUint(arg[eq+1:], 10, 32)\n\t\tif err != nil {\n\t\t\ta.logf(\"Error: invalid argument %q (expected SETTING_NAME=nnnn)\", arg)\n\t\t\treturn nil\n\t\t}\n\t\tsettings = append(settings, http2.Setting{\n\t\t\tID:  sid,\n\t\t\tVal: uint32(val),\n\t\t})\n\t}\n\ta.logf(\"Sending: %v\", settings)\n\treturn a.framer.WriteSettings(settings...)\n}\n\nfunc settingByName(name string) (http2.SettingID, bool) {\n\tfor _, sid := range [...]http2.SettingID{\n\t\thttp2.SettingHeaderTableSize,\n\t\thttp2.SettingEnablePush,\n\t\thttp2.SettingMaxConcurrentStreams,\n\t\thttp2.SettingInitialWindowSize,\n\t\thttp2.SettingMaxFrameSize,\n\t\thttp2.SettingMaxHeaderListSize,\n\t} {\n\t\tif strings.EqualFold(sid.String(), name) {\n\t\t\treturn sid, true\n\t\t}\n\t}\n\treturn 0, false\n}\n\nfunc (app *h2i) cmdPing(args []string) error {\n\tif len(args) > 1 {\n\t\tapp.logf(\"invalid PING usage: only accepts 0 or 1 args\")\n\t\treturn nil // nil means don't end the program\n\t}\n\tvar data [8]byte\n\tif len(args) == 1 {\n\t\tcopy(data[:], args[0])\n\t} else {\n\t\tcopy(data[:], \"h2i_ping\")\n\t}\n\treturn app.framer.WritePing(false, data)\n}\n\nfunc (app *h2i) cmdHeaders(args []string) error {\n\tif len(args) > 0 {\n\t\tapp.logf(\"Error: HEADERS doesn't yet take arguments.\")\n\t\t// TODO: flags for restricting window size, to force CONTINUATION\n\t\t// frames.\n\t\treturn nil\n\t}\n\tvar h1req bytes.Buffer\n\tapp.term.SetPrompt(\"(as HTTP/1.1)> \")\n\tdefer app.term.SetPrompt(\"h2i> \")\n\tfor {\n\t\tline, err := app.term.ReadLine()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\th1req.WriteString(line)\n\t\th1req.WriteString(\"\\r\\n\")\n\t\tif line == \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\treq, err := http.ReadRequest(bufio.NewReader(&h1req))\n\tif err != nil {\n\t\tapp.logf(\"Invalid HTTP/1.1 request: %v\", err)\n\t\treturn nil\n\t}\n\tif app.streamID == 0 {\n\t\tapp.streamID = 1\n\t} else {\n\t\tapp.streamID += 2\n\t}\n\tapp.logf(\"Opening Stream-ID %d:\", app.streamID)\n\thbf := app.encodeHeaders(req)\n\tif len(hbf) > 16<<10 {\n\t\tapp.logf(\"TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go\")\n\t\treturn nil\n\t}\n\treturn app.framer.WriteHeaders(http2.HeadersFrameParam{\n\t\tStreamID:      app.streamID,\n\t\tBlockFragment: hbf,\n\t\tEndStream:     req.Method == \"GET\" || req.Method == \"HEAD\", // good enough for now\n\t\tEndHeaders:    true,                                        // for now\n\t})\n}\n\nfunc (app *h2i) readFrames() error {\n\tfor {\n\t\tf, err := app.framer.ReadFrame()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ReadFrame: %v\", err)\n\t\t}\n\t\tapp.logf(\"%v\", f)\n\t\tswitch f := f.(type) {\n\t\tcase *http2.PingFrame:\n\t\t\tapp.logf(\"  Data = %q\", f.Data)\n\t\tcase *http2.SettingsFrame:\n\t\t\tf.ForeachSetting(func(s http2.Setting) error {\n\t\t\t\tapp.logf(\"  %v\", s)\n\t\t\t\tapp.peerSetting[s.ID] = s.Val\n\t\t\t\treturn nil\n\t\t\t})\n\t\tcase *http2.WindowUpdateFrame:\n\t\t\tapp.logf(\"  Window-Increment = %v\\n\", f.Increment)\n\t\tcase *http2.GoAwayFrame:\n\t\t\tapp.logf(\"  Last-Stream-ID = %d; Error-Code = %v (%d)\\n\", f.LastStreamID, f.ErrCode, f.ErrCode)\n\t\tcase *http2.DataFrame:\n\t\t\tapp.logf(\"  %q\", f.Data())\n\t\tcase *http2.HeadersFrame:\n\t\t\tif f.HasPriority() {\n\t\t\t\tapp.logf(\"  PRIORITY = %v\", f.Priority)\n\t\t\t}\n\t\t\tif app.hdec == nil {\n\t\t\t\t// TODO: if the user uses h2i to send a SETTINGS frame advertising\n\t\t\t\t// something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE\n\t\t\t\t// and stuff here instead of using the 4k default. But for now:\n\t\t\t\ttableSize := uint32(4 << 10)\n\t\t\t\tapp.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)\n\t\t\t}\n\t\t\tapp.hdec.Write(f.HeaderBlockFragment())\n\t\t}\n\t}\n}\n\n// called from readLoop\nfunc (app *h2i) onNewHeaderField(f hpack.HeaderField) {\n\tif f.Sensitive {\n\t\tapp.logf(\"  %s = %q (SENSITIVE)\", f.Name, f.Value)\n\t}\n\tapp.logf(\"  %s = %q\", f.Name, f.Value)\n}\n\nfunc (app *h2i) encodeHeaders(req *http.Request) []byte {\n\tapp.hbuf.Reset()\n\n\t// TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go\n\thost := req.Host\n\tif host == \"\" {\n\t\thost = req.URL.Host\n\t}\n\n\tpath := req.URL.Path\n\tif path == \"\" {\n\t\tpath = \"/\"\n\t}\n\n\tapp.writeHeader(\":authority\", host) // probably not right for all sites\n\tapp.writeHeader(\":method\", req.Method)\n\tapp.writeHeader(\":path\", path)\n\tapp.writeHeader(\":scheme\", \"https\")\n\n\tfor k, vv := range req.Header {\n\t\tlowKey := strings.ToLower(k)\n\t\tif lowKey == \"host\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tapp.writeHeader(lowKey, v)\n\t\t}\n\t}\n\treturn app.hbuf.Bytes()\n}\n\nfunc (app *h2i) writeHeader(name, value string) {\n\tapp.henc.WriteField(hpack.HeaderField{Name: name, Value: value})\n\tapp.logf(\" %s = %s\", name, value)\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/headermap.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage http2\n\nimport (\n\t\"net/http\"\n\t\"strings\"\n)\n\nvar (\n\tcommonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case\n\tcommonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case\n)\n\nfunc init() {\n\tfor _, v := range []string{\n\t\t\"accept\",\n\t\t\"accept-charset\",\n\t\t\"accept-encoding\",\n\t\t\"accept-language\",\n\t\t\"accept-ranges\",\n\t\t\"age\",\n\t\t\"access-control-allow-origin\",\n\t\t\"allow\",\n\t\t\"authorization\",\n\t\t\"cache-control\",\n\t\t\"content-disposition\",\n\t\t\"content-encoding\",\n\t\t\"content-language\",\n\t\t\"content-length\",\n\t\t\"content-location\",\n\t\t\"content-range\",\n\t\t\"content-type\",\n\t\t\"cookie\",\n\t\t\"date\",\n\t\t\"etag\",\n\t\t\"expect\",\n\t\t\"expires\",\n\t\t\"from\",\n\t\t\"host\",\n\t\t\"if-match\",\n\t\t\"if-modified-since\",\n\t\t\"if-none-match\",\n\t\t\"if-unmodified-since\",\n\t\t\"last-modified\",\n\t\t\"link\",\n\t\t\"location\",\n\t\t\"max-forwards\",\n\t\t\"proxy-authenticate\",\n\t\t\"proxy-authorization\",\n\t\t\"range\",\n\t\t\"referer\",\n\t\t\"refresh\",\n\t\t\"retry-after\",\n\t\t\"server\",\n\t\t\"set-cookie\",\n\t\t\"strict-transport-security\",\n\t\t\"transfer-encoding\",\n\t\t\"user-agent\",\n\t\t\"vary\",\n\t\t\"via\",\n\t\t\"www-authenticate\",\n\t} {\n\t\tchk := http.CanonicalHeaderKey(v)\n\t\tcommonLowerHeader[chk] = v\n\t\tcommonCanonHeader[v] = chk\n\t}\n}\n\nfunc lowerHeader(v string) string {\n\tif s, ok := commonLowerHeader[v]; ok {\n\t\treturn s\n\t}\n\treturn strings.ToLower(v)\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/hpack/encode.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage hpack\n\nimport (\n\t\"io\"\n)\n\nconst (\n\tuint32Max              = ^uint32(0)\n\tinitialHeaderTableSize = 4096\n)\n\ntype Encoder struct {\n\tdynTab dynamicTable\n\t// minSize is the minimum table size set by\n\t// SetMaxDynamicTableSize after the previous Header Table Size\n\t// Update.\n\tminSize uint32\n\t// maxSizeLimit is the maximum table size this encoder\n\t// supports. This will protect the encoder from too large\n\t// size.\n\tmaxSizeLimit uint32\n\t// tableSizeUpdate indicates whether \"Header Table Size\n\t// Update\" is required.\n\ttableSizeUpdate bool\n\tw               io.Writer\n\tbuf             []byte\n}\n\n// NewEncoder returns a new Encoder which performs HPACK encoding. An\n// encoded data is written to w.\nfunc NewEncoder(w io.Writer) *Encoder {\n\te := &Encoder{\n\t\tminSize:         uint32Max,\n\t\tmaxSizeLimit:    initialHeaderTableSize,\n\t\ttableSizeUpdate: false,\n\t\tw:               w,\n\t}\n\te.dynTab.setMaxSize(initialHeaderTableSize)\n\treturn e\n}\n\n// WriteField encodes f into a single Write to e's underlying Writer.\n// This function may also produce bytes for \"Header Table Size Update\"\n// if necessary.  If produced, it is done before encoding f.\nfunc (e *Encoder) WriteField(f HeaderField) error {\n\te.buf = e.buf[:0]\n\n\tif e.tableSizeUpdate {\n\t\te.tableSizeUpdate = false\n\t\tif e.minSize < e.dynTab.maxSize {\n\t\t\te.buf = appendTableSize(e.buf, e.minSize)\n\t\t}\n\t\te.minSize = uint32Max\n\t\te.buf = appendTableSize(e.buf, e.dynTab.maxSize)\n\t}\n\n\tidx, nameValueMatch := e.searchTable(f)\n\tif nameValueMatch {\n\t\te.buf = appendIndexed(e.buf, idx)\n\t} else {\n\t\tindexing := e.shouldIndex(f)\n\t\tif indexing {\n\t\t\te.dynTab.add(f)\n\t\t}\n\n\t\tif idx == 0 {\n\t\t\te.buf = appendNewName(e.buf, f, indexing)\n\t\t} else {\n\t\t\te.buf = appendIndexedName(e.buf, f, idx, indexing)\n\t\t}\n\t}\n\tn, err := e.w.Write(e.buf)\n\tif err == nil && n != len(e.buf) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\n// searchTable searches f in both stable and dynamic header tables.\n// The static header table is searched first. Only when there is no\n// exact match for both name and value, the dynamic header table is\n// then searched. If there is no match, i is 0. If both name and value\n// match, i is the matched index and nameValueMatch becomes true. If\n// only name matches, i points to that index and nameValueMatch\n// becomes false.\nfunc (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {\n\tfor idx, hf := range staticTable {\n\t\tif !constantTimeStringCompare(hf.Name, f.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tif i == 0 {\n\t\t\ti = uint64(idx + 1)\n\t\t}\n\t\tif f.Sensitive {\n\t\t\tcontinue\n\t\t}\n\t\tif !constantTimeStringCompare(hf.Value, f.Value) {\n\t\t\tcontinue\n\t\t}\n\t\ti = uint64(idx + 1)\n\t\tnameValueMatch = true\n\t\treturn\n\t}\n\n\tj, nameValueMatch := e.dynTab.search(f)\n\tif nameValueMatch || (i == 0 && j != 0) {\n\t\ti = j + uint64(len(staticTable))\n\t}\n\treturn\n}\n\n// SetMaxDynamicTableSize changes the dynamic header table size to v.\n// The actual size is bounded by the value passed to\n// SetMaxDynamicTableSizeLimit.\nfunc (e *Encoder) SetMaxDynamicTableSize(v uint32) {\n\tif v > e.maxSizeLimit {\n\t\tv = e.maxSizeLimit\n\t}\n\tif v < e.minSize {\n\t\te.minSize = v\n\t}\n\te.tableSizeUpdate = true\n\te.dynTab.setMaxSize(v)\n}\n\n// SetMaxDynamicTableSizeLimit changes the maximum value that can be\n// specified in SetMaxDynamicTableSize to v. By default, it is set to\n// 4096, which is the same size of the default dynamic header table\n// size described in HPACK specification. If the current maximum\n// dynamic header table size is strictly greater than v, \"Header Table\n// Size Update\" will be done in the next WriteField call and the\n// maximum dynamic header table size is truncated to v.\nfunc (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {\n\te.maxSizeLimit = v\n\tif e.dynTab.maxSize > v {\n\t\te.tableSizeUpdate = true\n\t\te.dynTab.setMaxSize(v)\n\t}\n}\n\n// shouldIndex reports whether f should be indexed.\nfunc (e *Encoder) shouldIndex(f HeaderField) bool {\n\treturn !f.Sensitive && f.size() <= e.dynTab.maxSize\n}\n\n// appendIndexed appends index i, as encoded in \"Indexed Header Field\"\n// representation, to dst and returns the extended buffer.\nfunc appendIndexed(dst []byte, i uint64) []byte {\n\tfirst := len(dst)\n\tdst = appendVarInt(dst, 7, i)\n\tdst[first] |= 0x80\n\treturn dst\n}\n\n// appendNewName appends f, as encoded in one of \"Literal Header field\n// - New Name\" representation variants, to dst and returns the\n// extended buffer.\n//\n// If f.Sensitive is true, \"Never Indexed\" representation is used. If\n// f.Sensitive is false and indexing is true, \"Inremental Indexing\"\n// representation is used.\nfunc appendNewName(dst []byte, f HeaderField, indexing bool) []byte {\n\tdst = append(dst, encodeTypeByte(indexing, f.Sensitive))\n\tdst = appendHpackString(dst, f.Name)\n\treturn appendHpackString(dst, f.Value)\n}\n\n// appendIndexedName appends f and index i referring indexed name\n// entry, as encoded in one of \"Literal Header field - Indexed Name\"\n// representation variants, to dst and returns the extended buffer.\n//\n// If f.Sensitive is true, \"Never Indexed\" representation is used. If\n// f.Sensitive is false and indexing is true, \"Incremental Indexing\"\n// representation is used.\nfunc appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {\n\tfirst := len(dst)\n\tvar n byte\n\tif indexing {\n\t\tn = 6\n\t} else {\n\t\tn = 4\n\t}\n\tdst = appendVarInt(dst, n, i)\n\tdst[first] |= encodeTypeByte(indexing, f.Sensitive)\n\treturn appendHpackString(dst, f.Value)\n}\n\n// appendTableSize appends v, as encoded in \"Header Table Size Update\"\n// representation, to dst and returns the extended buffer.\nfunc appendTableSize(dst []byte, v uint32) []byte {\n\tfirst := len(dst)\n\tdst = appendVarInt(dst, 5, uint64(v))\n\tdst[first] |= 0x20\n\treturn dst\n}\n\n// appendVarInt appends i, as encoded in variable integer form using n\n// bit prefix, to dst and returns the extended buffer.\n//\n// See\n// http://http2.github.io/http2-spec/compression.html#integer.representation\nfunc appendVarInt(dst []byte, n byte, i uint64) []byte {\n\tk := uint64((1 << n) - 1)\n\tif i < k {\n\t\treturn append(dst, byte(i))\n\t}\n\tdst = append(dst, byte(k))\n\ti -= k\n\tfor ; i >= 128; i >>= 7 {\n\t\tdst = append(dst, byte(0x80|(i&0x7f)))\n\t}\n\treturn append(dst, byte(i))\n}\n\n// appendHpackString appends s, as encoded in \"String Literal\"\n// representation, to dst and returns the the extended buffer.\n//\n// s will be encoded in Huffman codes only when it produces strictly\n// shorter byte string.\nfunc appendHpackString(dst []byte, s string) []byte {\n\thuffmanLength := HuffmanEncodeLength(s)\n\tif huffmanLength < uint64(len(s)) {\n\t\tfirst := len(dst)\n\t\tdst = appendVarInt(dst, 7, huffmanLength)\n\t\tdst = AppendHuffmanString(dst, s)\n\t\tdst[first] |= 0x80\n\t} else {\n\t\tdst = appendVarInt(dst, 7, uint64(len(s)))\n\t\tdst = append(dst, s...)\n\t}\n\treturn dst\n}\n\n// encodeTypeByte returns type byte. If sensitive is true, type byte\n// for \"Never Indexed\" representation is returned. If sensitive is\n// false and indexing is true, type byte for \"Incremental Indexing\"\n// representation is returned. Otherwise, type byte for \"Without\n// Indexing\" is returned.\nfunc encodeTypeByte(indexing, sensitive bool) byte {\n\tif sensitive {\n\t\treturn 0x10\n\t}\n\tif indexing {\n\t\treturn 0x40\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/hpack/hpack.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\n// Package hpack implements HPACK, a compression format for\n// efficiently representing HTTP header fields in the context of HTTP/2.\n//\n// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09\npackage hpack\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n)\n\n// A DecodingError is something the spec defines as a decoding error.\ntype DecodingError struct {\n\tErr error\n}\n\nfunc (de DecodingError) Error() string {\n\treturn fmt.Sprintf(\"decoding error: %v\", de.Err)\n}\n\n// An InvalidIndexError is returned when an encoder references a table\n// entry before the static table or after the end of the dynamic table.\ntype InvalidIndexError int\n\nfunc (e InvalidIndexError) Error() string {\n\treturn fmt.Sprintf(\"invalid indexed representation index %d\", int(e))\n}\n\n// A HeaderField is a name-value pair. Both the name and value are\n// treated as opaque sequences of octets.\ntype HeaderField struct {\n\tName, Value string\n\n\t// Sensitive means that this header field should never be\n\t// indexed.\n\tSensitive bool\n}\n\nfunc (hf *HeaderField) size() uint32 {\n\t// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1\n\t// \"The size of the dynamic table is the sum of the size of\n\t// its entries.  The size of an entry is the sum of its name's\n\t// length in octets (as defined in Section 5.2), its value's\n\t// length in octets (see Section 5.2), plus 32.  The size of\n\t// an entry is calculated using the length of the name and\n\t// value without any Huffman encoding applied.\"\n\n\t// This can overflow if somebody makes a large HeaderField\n\t// Name and/or Value by hand, but we don't care, because that\n\t// won't happen on the wire because the encoding doesn't allow\n\t// it.\n\treturn uint32(len(hf.Name) + len(hf.Value) + 32)\n}\n\n// A Decoder is the decoding context for incremental processing of\n// header blocks.\ntype Decoder struct {\n\tdynTab dynamicTable\n\temit   func(f HeaderField)\n\n\t// buf is the unparsed buffer. It's only written to\n\t// saveBuf if it was truncated in the middle of a header\n\t// block. Because it's usually not owned, we can only\n\t// process it under Write.\n\tbuf     []byte // usually not owned\n\tsaveBuf bytes.Buffer\n}\n\nfunc NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder {\n\td := &Decoder{\n\t\temit: emitFunc,\n\t}\n\td.dynTab.allowedMaxSize = maxSize\n\td.dynTab.setMaxSize(maxSize)\n\treturn d\n}\n\n// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their\n// underlying buffers for garbage reasons.\n\nfunc (d *Decoder) SetMaxDynamicTableSize(v uint32) {\n\td.dynTab.setMaxSize(v)\n}\n\n// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded\n// stream (via dynamic table size updates) may set the maximum size\n// to.\nfunc (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {\n\td.dynTab.allowedMaxSize = v\n}\n\ntype dynamicTable struct {\n\t// ents is the FIFO described at\n\t// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2\n\t// The newest (low index) is append at the end, and items are\n\t// evicted from the front.\n\tents           []HeaderField\n\tsize           uint32\n\tmaxSize        uint32 // current maxSize\n\tallowedMaxSize uint32 // maxSize may go up to this, inclusive\n}\n\nfunc (dt *dynamicTable) setMaxSize(v uint32) {\n\tdt.maxSize = v\n\tdt.evict()\n}\n\n// TODO: change dynamicTable to be a struct with a slice and a size int field,\n// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:\n//\n//\n// Then make add increment the size. maybe the max size should move from Decoder to\n// dynamicTable and add should return an ok bool if there was enough space.\n//\n// Later we'll need a remove operation on dynamicTable.\n\nfunc (dt *dynamicTable) add(f HeaderField) {\n\tdt.ents = append(dt.ents, f)\n\tdt.size += f.size()\n\tdt.evict()\n}\n\n// If we're too big, evict old stuff (front of the slice)\nfunc (dt *dynamicTable) evict() {\n\tbase := dt.ents // keep base pointer of slice\n\tfor dt.size > dt.maxSize {\n\t\tdt.size -= dt.ents[0].size()\n\t\tdt.ents = dt.ents[1:]\n\t}\n\n\t// Shift slice contents down if we evicted things.\n\tif len(dt.ents) != len(base) {\n\t\tcopy(base, dt.ents)\n\t\tdt.ents = base[:len(dt.ents)]\n\t}\n}\n\n// constantTimeStringCompare compares string a and b in a constant\n// time manner.\nfunc constantTimeStringCompare(a, b string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tc := byte(0)\n\n\tfor i := 0; i < len(a); i++ {\n\t\tc |= a[i] ^ b[i]\n\t}\n\n\treturn c == 0\n}\n\n// Search searches f in the table. The return value i is 0 if there is\n// no name match. If there is name match or name/value match, i is the\n// index of that entry (1-based). If both name and value match,\n// nameValueMatch becomes true.\nfunc (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {\n\tl := len(dt.ents)\n\tfor j := l - 1; j >= 0; j-- {\n\t\tent := dt.ents[j]\n\t\tif !constantTimeStringCompare(ent.Name, f.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tif i == 0 {\n\t\t\ti = uint64(l - j)\n\t\t}\n\t\tif f.Sensitive {\n\t\t\tcontinue\n\t\t}\n\t\tif !constantTimeStringCompare(ent.Value, f.Value) {\n\t\t\tcontinue\n\t\t}\n\t\ti = uint64(l - j)\n\t\tnameValueMatch = true\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (d *Decoder) maxTableIndex() int {\n\treturn len(d.dynTab.ents) + len(staticTable)\n}\n\nfunc (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {\n\tif i < 1 {\n\t\treturn\n\t}\n\tif i > uint64(d.maxTableIndex()) {\n\t\treturn\n\t}\n\tif i <= uint64(len(staticTable)) {\n\t\treturn staticTable[i-1], true\n\t}\n\tdents := d.dynTab.ents\n\treturn dents[len(dents)-(int(i)-len(staticTable))], true\n}\n\n// Decode decodes an entire block.\n//\n// TODO: remove this method and make it incremental later? This is\n// easier for debugging now.\nfunc (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {\n\tvar hf []HeaderField\n\tsaveFunc := d.emit\n\tdefer func() { d.emit = saveFunc }()\n\td.emit = func(f HeaderField) { hf = append(hf, f) }\n\tif _, err := d.Write(p); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn hf, nil\n}\n\nfunc (d *Decoder) Close() error {\n\tif d.saveBuf.Len() > 0 {\n\t\td.saveBuf.Reset()\n\t\treturn DecodingError{errors.New(\"truncated headers\")}\n\t}\n\treturn nil\n}\n\nfunc (d *Decoder) Write(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\t// Prevent state machine CPU attacks (making us redo\n\t\t// work up to the point of finding out we don't have\n\t\t// enough data)\n\t\treturn\n\t}\n\t// Only copy the data if we have to. Optimistically assume\n\t// that p will contain a complete header block.\n\tif d.saveBuf.Len() == 0 {\n\t\td.buf = p\n\t} else {\n\t\td.saveBuf.Write(p)\n\t\td.buf = d.saveBuf.Bytes()\n\t\td.saveBuf.Reset()\n\t}\n\n\tfor len(d.buf) > 0 {\n\t\terr = d.parseHeaderFieldRepr()\n\t\tif err != nil {\n\t\t\tif err == errNeedMore {\n\t\t\t\terr = nil\n\t\t\t\td.saveBuf.Write(d.buf)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn len(p), err\n}\n\n// errNeedMore is an internal sentinel error value that means the\n// buffer is truncated and we need to read more data before we can\n// continue parsing.\nvar errNeedMore = errors.New(\"need more data\")\n\ntype indexType int\n\nconst (\n\tindexedTrue indexType = iota\n\tindexedFalse\n\tindexedNever\n)\n\nfunc (v indexType) indexed() bool   { return v == indexedTrue }\nfunc (v indexType) sensitive() bool { return v == indexedNever }\n\n// returns errNeedMore if there isn't enough data available.\n// any other error is fatal.\n// consumes d.buf iff it returns nil.\n// precondition: must be called with len(d.buf) > 0\nfunc (d *Decoder) parseHeaderFieldRepr() error {\n\tb := d.buf[0]\n\tswitch {\n\tcase b&128 != 0:\n\t\t// Indexed representation.\n\t\t// High bit set?\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1\n\t\treturn d.parseFieldIndexed()\n\tcase b&192 == 64:\n\t\t// 6.2.1 Literal Header Field with Incremental Indexing\n\t\t// 0b10xxxxxx: top two bits are 10\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1\n\t\treturn d.parseFieldLiteral(6, indexedTrue)\n\tcase b&240 == 0:\n\t\t// 6.2.2 Literal Header Field without Indexing\n\t\t// 0b0000xxxx: top four bits are 0000\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2\n\t\treturn d.parseFieldLiteral(4, indexedFalse)\n\tcase b&240 == 16:\n\t\t// 6.2.3 Literal Header Field never Indexed\n\t\t// 0b0001xxxx: top four bits are 0001\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3\n\t\treturn d.parseFieldLiteral(4, indexedNever)\n\tcase b&224 == 32:\n\t\t// 6.3 Dynamic Table Size Update\n\t\t// Top three bits are '001'.\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3\n\t\treturn d.parseDynamicTableSizeUpdate()\n\t}\n\n\treturn DecodingError{errors.New(\"invalid encoding\")}\n}\n\n// (same invariants and behavior as parseHeaderFieldRepr)\nfunc (d *Decoder) parseFieldIndexed() error {\n\tbuf := d.buf\n\tidx, buf, err := readVarInt(7, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\thf, ok := d.at(idx)\n\tif !ok {\n\t\treturn DecodingError{InvalidIndexError(idx)}\n\t}\n\td.emit(HeaderField{Name: hf.Name, Value: hf.Value})\n\td.buf = buf\n\treturn nil\n}\n\n// (same invariants and behavior as parseHeaderFieldRepr)\nfunc (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {\n\tbuf := d.buf\n\tnameIdx, buf, err := readVarInt(n, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar hf HeaderField\n\tif nameIdx > 0 {\n\t\tihf, ok := d.at(nameIdx)\n\t\tif !ok {\n\t\t\treturn DecodingError{InvalidIndexError(nameIdx)}\n\t\t}\n\t\thf.Name = ihf.Name\n\t} else {\n\t\thf.Name, buf, err = readString(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\thf.Value, buf, err = readString(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.buf = buf\n\tif it.indexed() {\n\t\td.dynTab.add(hf)\n\t}\n\thf.Sensitive = it.sensitive()\n\td.emit(hf)\n\treturn nil\n}\n\n// (same invariants and behavior as parseHeaderFieldRepr)\nfunc (d *Decoder) parseDynamicTableSizeUpdate() error {\n\tbuf := d.buf\n\tsize, buf, err := readVarInt(5, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif size > uint64(d.dynTab.allowedMaxSize) {\n\t\treturn DecodingError{errors.New(\"dynamic table size update too large\")}\n\t}\n\td.dynTab.setMaxSize(uint32(size))\n\td.buf = buf\n\treturn nil\n}\n\nvar errVarintOverflow = DecodingError{errors.New(\"varint integer overflow\")}\n\n// readVarInt reads an unsigned variable length integer off the\n// beginning of p. n is the parameter as described in\n// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.\n//\n// n must always be between 1 and 8.\n//\n// The returned remain buffer is either a smaller suffix of p, or err != nil.\n// The error is errNeedMore if p doesn't contain a complete integer.\nfunc readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {\n\tif n < 1 || n > 8 {\n\t\tpanic(\"bad n\")\n\t}\n\tif len(p) == 0 {\n\t\treturn 0, p, errNeedMore\n\t}\n\ti = uint64(p[0])\n\tif n < 8 {\n\t\ti &= (1 << uint64(n)) - 1\n\t}\n\tif i < (1<<uint64(n))-1 {\n\t\treturn i, p[1:], nil\n\t}\n\n\torigP := p\n\tp = p[1:]\n\tvar m uint64\n\tfor len(p) > 0 {\n\t\tb := p[0]\n\t\tp = p[1:]\n\t\ti += uint64(b&127) << m\n\t\tif b&128 == 0 {\n\t\t\treturn i, p, nil\n\t\t}\n\t\tm += 7\n\t\tif m >= 63 { // TODO: proper overflow check. making this up.\n\t\t\treturn 0, origP, errVarintOverflow\n\t\t}\n\t}\n\treturn 0, origP, errNeedMore\n}\n\nfunc readString(p []byte) (s string, remain []byte, err error) {\n\tif len(p) == 0 {\n\t\treturn \"\", p, errNeedMore\n\t}\n\tisHuff := p[0]&128 != 0\n\tstrLen, p, err := readVarInt(7, p)\n\tif err != nil {\n\t\treturn \"\", p, err\n\t}\n\tif uint64(len(p)) < strLen {\n\t\treturn \"\", p, errNeedMore\n\t}\n\tif !isHuff {\n\t\treturn string(p[:strLen]), p[strLen:], nil\n\t}\n\n\t// TODO: optimize this garbage:\n\tvar buf bytes.Buffer\n\tif _, err := HuffmanDecode(&buf, p[:strLen]); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn buf.String(), p[strLen:], nil\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/hpack/huffman.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage hpack\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n)\n\nvar bufPool = sync.Pool{\n\tNew: func() interface{} { return new(bytes.Buffer) },\n}\n\n// HuffmanDecode decodes the string in v and writes the expanded\n// result to w, returning the number of bytes written to w and the\n// Write call's return value. At most one Write call is made.\nfunc HuffmanDecode(w io.Writer, v []byte) (int, error) {\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tbuf.Reset()\n\tdefer bufPool.Put(buf)\n\n\tn := rootHuffmanNode\n\tcur, nbits := uint(0), uint8(0)\n\tfor _, b := range v {\n\t\tcur = cur<<8 | uint(b)\n\t\tnbits += 8\n\t\tfor nbits >= 8 {\n\t\t\tn = n.children[byte(cur>>(nbits-8))]\n\t\t\tif n.children == nil {\n\t\t\t\tbuf.WriteByte(n.sym)\n\t\t\t\tnbits -= n.codeLen\n\t\t\t\tn = rootHuffmanNode\n\t\t\t} else {\n\t\t\t\tnbits -= 8\n\t\t\t}\n\t\t}\n\t}\n\tfor nbits > 0 {\n\t\tn = n.children[byte(cur<<(8-nbits))]\n\t\tif n.children != nil || n.codeLen > nbits {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteByte(n.sym)\n\t\tnbits -= n.codeLen\n\t\tn = rootHuffmanNode\n\t}\n\treturn w.Write(buf.Bytes())\n}\n\ntype node struct {\n\t// children is non-nil for internal nodes\n\tchildren []*node\n\n\t// The following are only valid if children is nil:\n\tcodeLen uint8 // number of bits that led to the output of sym\n\tsym     byte  // output symbol\n}\n\nfunc newInternalNode() *node {\n\treturn &node{children: make([]*node, 256)}\n}\n\nvar rootHuffmanNode = newInternalNode()\n\nfunc init() {\n\tfor i, code := range huffmanCodes {\n\t\tif i > 255 {\n\t\t\tpanic(\"too many huffman codes\")\n\t\t}\n\t\taddDecoderNode(byte(i), code, huffmanCodeLen[i])\n\t}\n}\n\nfunc addDecoderNode(sym byte, code uint32, codeLen uint8) {\n\tcur := rootHuffmanNode\n\tfor codeLen > 8 {\n\t\tcodeLen -= 8\n\t\ti := uint8(code >> codeLen)\n\t\tif cur.children[i] == nil {\n\t\t\tcur.children[i] = newInternalNode()\n\t\t}\n\t\tcur = cur.children[i]\n\t}\n\tshift := 8 - codeLen\n\tstart, end := int(uint8(code<<shift)), int(1<<shift)\n\tfor i := start; i < start+end; i++ {\n\t\tcur.children[i] = &node{sym: sym, codeLen: codeLen}\n\t}\n}\n\n// AppendHuffmanString appends s, as encoded in Huffman codes, to dst\n// and returns the extended buffer.\nfunc AppendHuffmanString(dst []byte, s string) []byte {\n\trembits := uint8(8)\n\n\tfor i := 0; i < len(s); i++ {\n\t\tif rembits == 8 {\n\t\t\tdst = append(dst, 0)\n\t\t}\n\t\tdst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])\n\t}\n\n\tif rembits < 8 {\n\t\t// special EOS symbol\n\t\tcode := uint32(0x3fffffff)\n\t\tnbits := uint8(30)\n\n\t\tt := uint8(code >> (nbits - rembits))\n\t\tdst[len(dst)-1] |= t\n\t}\n\n\treturn dst\n}\n\n// HuffmanEncodeLength returns the number of bytes required to encode\n// s in Huffman codes. The result is round up to byte boundary.\nfunc HuffmanEncodeLength(s string) uint64 {\n\tn := uint64(0)\n\tfor i := 0; i < len(s); i++ {\n\t\tn += uint64(huffmanCodeLen[s[i]])\n\t}\n\treturn (n + 7) / 8\n}\n\n// appendByteToHuffmanCode appends Huffman code for c to dst and\n// returns the extended buffer and the remaining bits in the last\n// element. The appending is not byte aligned and the remaining bits\n// in the last element of dst is given in rembits.\nfunc appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {\n\tcode := huffmanCodes[c]\n\tnbits := huffmanCodeLen[c]\n\n\tfor {\n\t\tif rembits > nbits {\n\t\t\tt := uint8(code << (rembits - nbits))\n\t\t\tdst[len(dst)-1] |= t\n\t\t\trembits -= nbits\n\t\t\tbreak\n\t\t}\n\n\t\tt := uint8(code >> (nbits - rembits))\n\t\tdst[len(dst)-1] |= t\n\n\t\tnbits -= rembits\n\t\trembits = 8\n\n\t\tif nbits == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tdst = append(dst, 0)\n\t}\n\n\treturn dst, rembits\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/hpack/tables.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage hpack\n\nfunc pair(name, value string) HeaderField {\n\treturn HeaderField{Name: name, Value: value}\n}\n\n// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B\nvar staticTable = []HeaderField{\n\tpair(\":authority\", \"\"), // index 1 (1-based)\n\tpair(\":method\", \"GET\"),\n\tpair(\":method\", \"POST\"),\n\tpair(\":path\", \"/\"),\n\tpair(\":path\", \"/index.html\"),\n\tpair(\":scheme\", \"http\"),\n\tpair(\":scheme\", \"https\"),\n\tpair(\":status\", \"200\"),\n\tpair(\":status\", \"204\"),\n\tpair(\":status\", \"206\"),\n\tpair(\":status\", \"304\"),\n\tpair(\":status\", \"400\"),\n\tpair(\":status\", \"404\"),\n\tpair(\":status\", \"500\"),\n\tpair(\"accept-charset\", \"\"),\n\tpair(\"accept-encoding\", \"gzip, deflate\"),\n\tpair(\"accept-language\", \"\"),\n\tpair(\"accept-ranges\", \"\"),\n\tpair(\"accept\", \"\"),\n\tpair(\"access-control-allow-origin\", \"\"),\n\tpair(\"age\", \"\"),\n\tpair(\"allow\", \"\"),\n\tpair(\"authorization\", \"\"),\n\tpair(\"cache-control\", \"\"),\n\tpair(\"content-disposition\", \"\"),\n\tpair(\"content-encoding\", \"\"),\n\tpair(\"content-language\", \"\"),\n\tpair(\"content-length\", \"\"),\n\tpair(\"content-location\", \"\"),\n\tpair(\"content-range\", \"\"),\n\tpair(\"content-type\", \"\"),\n\tpair(\"cookie\", \"\"),\n\tpair(\"date\", \"\"),\n\tpair(\"etag\", \"\"),\n\tpair(\"expect\", \"\"),\n\tpair(\"expires\", \"\"),\n\tpair(\"from\", \"\"),\n\tpair(\"host\", \"\"),\n\tpair(\"if-match\", \"\"),\n\tpair(\"if-modified-since\", \"\"),\n\tpair(\"if-none-match\", \"\"),\n\tpair(\"if-range\", \"\"),\n\tpair(\"if-unmodified-since\", \"\"),\n\tpair(\"last-modified\", \"\"),\n\tpair(\"link\", \"\"),\n\tpair(\"location\", \"\"),\n\tpair(\"max-forwards\", \"\"),\n\tpair(\"proxy-authenticate\", \"\"),\n\tpair(\"proxy-authorization\", \"\"),\n\tpair(\"range\", \"\"),\n\tpair(\"referer\", \"\"),\n\tpair(\"refresh\", \"\"),\n\tpair(\"retry-after\", \"\"),\n\tpair(\"server\", \"\"),\n\tpair(\"set-cookie\", \"\"),\n\tpair(\"strict-transport-security\", \"\"),\n\tpair(\"transfer-encoding\", \"\"),\n\tpair(\"user-agent\", \"\"),\n\tpair(\"vary\", \"\"),\n\tpair(\"via\", \"\"),\n\tpair(\"www-authenticate\", \"\"),\n}\n\nvar huffmanCodes = []uint32{\n\t0x1ff8,\n\t0x7fffd8,\n\t0xfffffe2,\n\t0xfffffe3,\n\t0xfffffe4,\n\t0xfffffe5,\n\t0xfffffe6,\n\t0xfffffe7,\n\t0xfffffe8,\n\t0xffffea,\n\t0x3ffffffc,\n\t0xfffffe9,\n\t0xfffffea,\n\t0x3ffffffd,\n\t0xfffffeb,\n\t0xfffffec,\n\t0xfffffed,\n\t0xfffffee,\n\t0xfffffef,\n\t0xffffff0,\n\t0xffffff1,\n\t0xffffff2,\n\t0x3ffffffe,\n\t0xffffff3,\n\t0xffffff4,\n\t0xffffff5,\n\t0xffffff6,\n\t0xffffff7,\n\t0xffffff8,\n\t0xffffff9,\n\t0xffffffa,\n\t0xffffffb,\n\t0x14,\n\t0x3f8,\n\t0x3f9,\n\t0xffa,\n\t0x1ff9,\n\t0x15,\n\t0xf8,\n\t0x7fa,\n\t0x3fa,\n\t0x3fb,\n\t0xf9,\n\t0x7fb,\n\t0xfa,\n\t0x16,\n\t0x17,\n\t0x18,\n\t0x0,\n\t0x1,\n\t0x2,\n\t0x19,\n\t0x1a,\n\t0x1b,\n\t0x1c,\n\t0x1d,\n\t0x1e,\n\t0x1f,\n\t0x5c,\n\t0xfb,\n\t0x7ffc,\n\t0x20,\n\t0xffb,\n\t0x3fc,\n\t0x1ffa,\n\t0x21,\n\t0x5d,\n\t0x5e,\n\t0x5f,\n\t0x60,\n\t0x61,\n\t0x62,\n\t0x63,\n\t0x64,\n\t0x65,\n\t0x66,\n\t0x67,\n\t0x68,\n\t0x69,\n\t0x6a,\n\t0x6b,\n\t0x6c,\n\t0x6d,\n\t0x6e,\n\t0x6f,\n\t0x70,\n\t0x71,\n\t0x72,\n\t0xfc,\n\t0x73,\n\t0xfd,\n\t0x1ffb,\n\t0x7fff0,\n\t0x1ffc,\n\t0x3ffc,\n\t0x22,\n\t0x7ffd,\n\t0x3,\n\t0x23,\n\t0x4,\n\t0x24,\n\t0x5,\n\t0x25,\n\t0x26,\n\t0x27,\n\t0x6,\n\t0x74,\n\t0x75,\n\t0x28,\n\t0x29,\n\t0x2a,\n\t0x7,\n\t0x2b,\n\t0x76,\n\t0x2c,\n\t0x8,\n\t0x9,\n\t0x2d,\n\t0x77,\n\t0x78,\n\t0x79,\n\t0x7a,\n\t0x7b,\n\t0x7ffe,\n\t0x7fc,\n\t0x3ffd,\n\t0x1ffd,\n\t0xffffffc,\n\t0xfffe6,\n\t0x3fffd2,\n\t0xfffe7,\n\t0xfffe8,\n\t0x3fffd3,\n\t0x3fffd4,\n\t0x3fffd5,\n\t0x7fffd9,\n\t0x3fffd6,\n\t0x7fffda,\n\t0x7fffdb,\n\t0x7fffdc,\n\t0x7fffdd,\n\t0x7fffde,\n\t0xffffeb,\n\t0x7fffdf,\n\t0xffffec,\n\t0xffffed,\n\t0x3fffd7,\n\t0x7fffe0,\n\t0xffffee,\n\t0x7fffe1,\n\t0x7fffe2,\n\t0x7fffe3,\n\t0x7fffe4,\n\t0x1fffdc,\n\t0x3fffd8,\n\t0x7fffe5,\n\t0x3fffd9,\n\t0x7fffe6,\n\t0x7fffe7,\n\t0xffffef,\n\t0x3fffda,\n\t0x1fffdd,\n\t0xfffe9,\n\t0x3fffdb,\n\t0x3fffdc,\n\t0x7fffe8,\n\t0x7fffe9,\n\t0x1fffde,\n\t0x7fffea,\n\t0x3fffdd,\n\t0x3fffde,\n\t0xfffff0,\n\t0x1fffdf,\n\t0x3fffdf,\n\t0x7fffeb,\n\t0x7fffec,\n\t0x1fffe0,\n\t0x1fffe1,\n\t0x3fffe0,\n\t0x1fffe2,\n\t0x7fffed,\n\t0x3fffe1,\n\t0x7fffee,\n\t0x7fffef,\n\t0xfffea,\n\t0x3fffe2,\n\t0x3fffe3,\n\t0x3fffe4,\n\t0x7ffff0,\n\t0x3fffe5,\n\t0x3fffe6,\n\t0x7ffff1,\n\t0x3ffffe0,\n\t0x3ffffe1,\n\t0xfffeb,\n\t0x7fff1,\n\t0x3fffe7,\n\t0x7ffff2,\n\t0x3fffe8,\n\t0x1ffffec,\n\t0x3ffffe2,\n\t0x3ffffe3,\n\t0x3ffffe4,\n\t0x7ffffde,\n\t0x7ffffdf,\n\t0x3ffffe5,\n\t0xfffff1,\n\t0x1ffffed,\n\t0x7fff2,\n\t0x1fffe3,\n\t0x3ffffe6,\n\t0x7ffffe0,\n\t0x7ffffe1,\n\t0x3ffffe7,\n\t0x7ffffe2,\n\t0xfffff2,\n\t0x1fffe4,\n\t0x1fffe5,\n\t0x3ffffe8,\n\t0x3ffffe9,\n\t0xffffffd,\n\t0x7ffffe3,\n\t0x7ffffe4,\n\t0x7ffffe5,\n\t0xfffec,\n\t0xfffff3,\n\t0xfffed,\n\t0x1fffe6,\n\t0x3fffe9,\n\t0x1fffe7,\n\t0x1fffe8,\n\t0x7ffff3,\n\t0x3fffea,\n\t0x3fffeb,\n\t0x1ffffee,\n\t0x1ffffef,\n\t0xfffff4,\n\t0xfffff5,\n\t0x3ffffea,\n\t0x7ffff4,\n\t0x3ffffeb,\n\t0x7ffffe6,\n\t0x3ffffec,\n\t0x3ffffed,\n\t0x7ffffe7,\n\t0x7ffffe8,\n\t0x7ffffe9,\n\t0x7ffffea,\n\t0x7ffffeb,\n\t0xffffffe,\n\t0x7ffffec,\n\t0x7ffffed,\n\t0x7ffffee,\n\t0x7ffffef,\n\t0x7fffff0,\n\t0x3ffffee,\n}\n\nvar huffmanCodeLen = []uint8{\n\t13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,\n\t28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,\n\t6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,\n\t5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,\n\t13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,\n\t7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,\n\t15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,\n\t6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,\n\t20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,\n\t24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,\n\t22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,\n\t21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,\n\t26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,\n\t19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,\n\t20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,\n\t26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/http2.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\n// Package http2 implements the HTTP/2 protocol.\n//\n// This is a work in progress. This package is low-level and intended\n// to be used directly by very few people. Most users will use it\n// indirectly through integration with the net/http package. See\n// ConfigureServer. That ConfigureServer call will likely be automatic\n// or available via an empty import in the future.\n//\n// See http://http2.github.io/\npackage http2\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nvar VerboseLogs = false\n\nconst (\n\t// ClientPreface is the string that must be sent by new\n\t// connections from clients.\n\tClientPreface = \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\"\n\n\t// SETTINGS_MAX_FRAME_SIZE default\n\t// http://http2.github.io/http2-spec/#rfc.section.6.5.2\n\tinitialMaxFrameSize = 16384\n\n\t// NextProtoTLS is the NPN/ALPN protocol negotiated during\n\t// HTTP/2's TLS setup.\n\tNextProtoTLS = \"h2\"\n\n\t// http://http2.github.io/http2-spec/#SettingValues\n\tinitialHeaderTableSize = 4096\n\n\tinitialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size\n\n\tdefaultMaxReadFrameSize = 1 << 20\n)\n\nvar (\n\tclientPreface = []byte(ClientPreface)\n)\n\ntype streamState int\n\nconst (\n\tstateIdle streamState = iota\n\tstateOpen\n\tstateHalfClosedLocal\n\tstateHalfClosedRemote\n\tstateResvLocal\n\tstateResvRemote\n\tstateClosed\n)\n\nvar stateName = [...]string{\n\tstateIdle:             \"Idle\",\n\tstateOpen:             \"Open\",\n\tstateHalfClosedLocal:  \"HalfClosedLocal\",\n\tstateHalfClosedRemote: \"HalfClosedRemote\",\n\tstateResvLocal:        \"ResvLocal\",\n\tstateResvRemote:       \"ResvRemote\",\n\tstateClosed:           \"Closed\",\n}\n\nfunc (st streamState) String() string {\n\treturn stateName[st]\n}\n\n// Setting is a setting parameter: which setting it is, and its value.\ntype Setting struct {\n\t// ID is which setting is being set.\n\t// See http://http2.github.io/http2-spec/#SettingValues\n\tID SettingID\n\n\t// Val is the value.\n\tVal uint32\n}\n\nfunc (s Setting) String() string {\n\treturn fmt.Sprintf(\"[%v = %d]\", s.ID, s.Val)\n}\n\n// Valid reports whether the setting is valid.\nfunc (s Setting) Valid() error {\n\t// Limits and error codes from 6.5.2 Defined SETTINGS Parameters\n\tswitch s.ID {\n\tcase SettingEnablePush:\n\t\tif s.Val != 1 && s.Val != 0 {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\tcase SettingInitialWindowSize:\n\t\tif s.Val > 1<<31-1 {\n\t\t\treturn ConnectionError(ErrCodeFlowControl)\n\t\t}\n\tcase SettingMaxFrameSize:\n\t\tif s.Val < 16384 || s.Val > 1<<24-1 {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t}\n\treturn nil\n}\n\n// A SettingID is an HTTP/2 setting as defined in\n// http://http2.github.io/http2-spec/#iana-settings\ntype SettingID uint16\n\nconst (\n\tSettingHeaderTableSize      SettingID = 0x1\n\tSettingEnablePush           SettingID = 0x2\n\tSettingMaxConcurrentStreams SettingID = 0x3\n\tSettingInitialWindowSize    SettingID = 0x4\n\tSettingMaxFrameSize         SettingID = 0x5\n\tSettingMaxHeaderListSize    SettingID = 0x6\n)\n\nvar settingName = map[SettingID]string{\n\tSettingHeaderTableSize:      \"HEADER_TABLE_SIZE\",\n\tSettingEnablePush:           \"ENABLE_PUSH\",\n\tSettingMaxConcurrentStreams: \"MAX_CONCURRENT_STREAMS\",\n\tSettingInitialWindowSize:    \"INITIAL_WINDOW_SIZE\",\n\tSettingMaxFrameSize:         \"MAX_FRAME_SIZE\",\n\tSettingMaxHeaderListSize:    \"MAX_HEADER_LIST_SIZE\",\n}\n\nfunc (s SettingID) String() string {\n\tif v, ok := settingName[s]; ok {\n\t\treturn v\n\t}\n\treturn fmt.Sprintf(\"UNKNOWN_SETTING_%d\", uint16(s))\n}\n\nfunc validHeader(v string) bool {\n\tif len(v) == 0 {\n\t\treturn false\n\t}\n\tfor _, r := range v {\n\t\t// \"Just as in HTTP/1.x, header field names are\n\t\t// strings of ASCII characters that are compared in a\n\t\t// case-insensitive fashion. However, header field\n\t\t// names MUST be converted to lowercase prior to their\n\t\t// encoding in HTTP/2. \"\n\t\tif r >= 127 || ('A' <= r && r <= 'Z') {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nvar httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)\n\nfunc init() {\n\tfor i := 100; i <= 999; i++ {\n\t\tif v := http.StatusText(i); v != \"\" {\n\t\t\thttpCodeStringCommon[i] = strconv.Itoa(i)\n\t\t}\n\t}\n}\n\nfunc httpCodeString(code int) string {\n\tif s, ok := httpCodeStringCommon[code]; ok {\n\t\treturn s\n\t}\n\treturn strconv.Itoa(code)\n}\n\n// from pkg io\ntype stringWriter interface {\n\tWriteString(s string) (n int, err error)\n}\n\n// A gate lets two goroutines coordinate their activities.\ntype gate chan struct{}\n\nfunc (g gate) Done() { g <- struct{}{} }\nfunc (g gate) Wait() { <-g }\n\n// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).\ntype closeWaiter chan struct{}\n\n// Init makes a closeWaiter usable.\n// It exists because so a closeWaiter value can be placed inside a\n// larger struct and have the Mutex and Cond's memory in the same\n// allocation.\nfunc (cw *closeWaiter) Init() {\n\t*cw = make(chan struct{})\n}\n\n// Close marks the closeWaiter as closed and unblocks any waiters.\nfunc (cw closeWaiter) Close() {\n\tclose(cw)\n}\n\n// Wait waits for the closeWaiter to become closed.\nfunc (cw closeWaiter) Wait() {\n\t<-cw\n}\n\n// bufferedWriter is a buffered writer that writes to w.\n// Its buffered writer is lazily allocated as needed, to minimize\n// idle memory usage with many connections.\ntype bufferedWriter struct {\n\tw  io.Writer     // immutable\n\tbw *bufio.Writer // non-nil when data is buffered\n}\n\nfunc newBufferedWriter(w io.Writer) *bufferedWriter {\n\treturn &bufferedWriter{w: w}\n}\n\nvar bufWriterPool = sync.Pool{\n\tNew: func() interface{} {\n\t\t// TODO: pick something better? this is a bit under\n\t\t// (3 x typical 1500 byte MTU) at least.\n\t\treturn bufio.NewWriterSize(nil, 4<<10)\n\t},\n}\n\nfunc (w *bufferedWriter) Write(p []byte) (n int, err error) {\n\tif w.bw == nil {\n\t\tbw := bufWriterPool.Get().(*bufio.Writer)\n\t\tbw.Reset(w.w)\n\t\tw.bw = bw\n\t}\n\treturn w.bw.Write(p)\n}\n\nfunc (w *bufferedWriter) Flush() error {\n\tbw := w.bw\n\tif bw == nil {\n\t\treturn nil\n\t}\n\terr := bw.Flush()\n\tbw.Reset(nil)\n\tbufWriterPool.Put(bw)\n\tw.bw = nil\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/pipe.go",
    "content": "// Copyright 2014 The Go Authors.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage http2\n\nimport (\n\t\"sync\"\n)\n\ntype pipe struct {\n\tb buffer\n\tc sync.Cond\n\tm sync.Mutex\n}\n\n// Read waits until data is available and copies bytes\n// from the buffer into p.\nfunc (r *pipe) Read(p []byte) (n int, err error) {\n\tr.c.L.Lock()\n\tdefer r.c.L.Unlock()\n\tfor r.b.Len() == 0 && !r.b.closed {\n\t\tr.c.Wait()\n\t}\n\treturn r.b.Read(p)\n}\n\n// Write copies bytes from p into the buffer and wakes a reader.\n// It is an error to write more data than the buffer can hold.\nfunc (w *pipe) Write(p []byte) (n int, err error) {\n\tw.c.L.Lock()\n\tdefer w.c.L.Unlock()\n\tdefer w.c.Signal()\n\treturn w.b.Write(p)\n}\n\nfunc (c *pipe) Close(err error) {\n\tc.c.L.Lock()\n\tdefer c.c.L.Unlock()\n\tdefer c.c.Signal()\n\tc.b.Close(err)\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/server.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\n// TODO: replace all <-sc.doneServing with reads from the stream's cw\n// instead, and make sure that on close we close all open\n// streams. then remove doneServing?\n\n// TODO: finish GOAWAY support. Consider each incoming frame type and\n// whether it should be ignored during a shutdown race.\n\n// TODO: disconnect idle clients. GFE seems to do 4 minutes. make\n// configurable?  or maximum number of idle clients and remove the\n// oldest?\n\n// TODO: turn off the serve goroutine when idle, so\n// an idle conn only has the readFrames goroutine active. (which could\n// also be optimized probably to pin less memory in crypto/tls). This\n// would involve tracking when the serve goroutine is active (atomic\n// int32 read/CAS probably?) and starting it up when frames arrive,\n// and shutting it down when all handlers exit. the occasional PING\n// packets could use time.AfterFunc to call sc.wakeStartServeLoop()\n// (which is a no-op if already running) and then queue the PING write\n// as normal. The serve loop would then exit in most cases (if no\n// Handlers running) and not be woken up again until the PING packet\n// returns.\n\n// TODO (maybe): add a mechanism for Handlers to going into\n// half-closed-local mode (rw.(io.Closer) test?) but not exit their\n// handler, and continue to be able to read from the\n// Request.Body. This would be a somewhat semantic change from HTTP/1\n// (or at least what we expose in net/http), so I'd probably want to\n// add it there too. For now, this package says that returning from\n// the Handler ServeHTTP function means you're both done reading and\n// done writing, without a way to stop just one or the other.\n\npackage http2\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/bradfitz/http2/hpack\"\n)\n\nconst (\n\tprefaceTimeout        = 10 * time.Second\n\tfirstSettingsTimeout  = 2 * time.Second // should be in-flight with preface anyway\n\thandlerChunkWriteSize = 4 << 10\n\tdefaultMaxStreams     = 250 // TODO: make this 100 as the GFE seems to?\n)\n\nvar (\n\terrClientDisconnected = errors.New(\"client disconnected\")\n\terrClosedBody         = errors.New(\"body closed by handler\")\n\terrStreamBroken       = errors.New(\"http2: stream broken\")\n)\n\nvar responseWriterStatePool = sync.Pool{\n\tNew: func() interface{} {\n\t\trws := &responseWriterState{}\n\t\trws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)\n\t\treturn rws\n\t},\n}\n\n// Test hooks.\nvar (\n\ttestHookOnConn        func()\n\ttestHookGetServerConn func(*serverConn)\n\ttestHookOnPanicMu     *sync.Mutex // nil except in tests\n\ttestHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)\n)\n\n// Server is an HTTP/2 server.\ntype Server struct {\n\t// MaxHandlers limits the number of http.Handler ServeHTTP goroutines\n\t// which may run at a time over all connections.\n\t// Negative or zero no limit.\n\t// TODO: implement\n\tMaxHandlers int\n\n\t// MaxConcurrentStreams optionally specifies the number of\n\t// concurrent streams that each client may have open at a\n\t// time. This is unrelated to the number of http.Handler goroutines\n\t// which may be active globally, which is MaxHandlers.\n\t// If zero, MaxConcurrentStreams defaults to at least 100, per\n\t// the HTTP/2 spec's recommendations.\n\tMaxConcurrentStreams uint32\n\n\t// MaxReadFrameSize optionally specifies the largest frame\n\t// this server is willing to read. A valid value is between\n\t// 16k and 16M, inclusive. If zero or otherwise invalid, a\n\t// default value is used.\n\tMaxReadFrameSize uint32\n\n\t// PermitProhibitedCipherSuites, if true, permits the use of\n\t// cipher suites prohibited by the HTTP/2 spec.\n\tPermitProhibitedCipherSuites bool\n}\n\nfunc (s *Server) maxReadFrameSize() uint32 {\n\tif v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {\n\t\treturn v\n\t}\n\treturn defaultMaxReadFrameSize\n}\n\nfunc (s *Server) maxConcurrentStreams() uint32 {\n\tif v := s.MaxConcurrentStreams; v > 0 {\n\t\treturn v\n\t}\n\treturn defaultMaxStreams\n}\n\n// ConfigureServer adds HTTP/2 support to a net/http Server.\n//\n// The configuration conf may be nil.\n//\n// ConfigureServer must be called before s begins serving.\nfunc ConfigureServer(s *http.Server, conf *Server) {\n\tif conf == nil {\n\t\tconf = new(Server)\n\t}\n\tif s.TLSConfig == nil {\n\t\ts.TLSConfig = new(tls.Config)\n\t}\n\n\t// Note: not setting MinVersion to tls.VersionTLS12,\n\t// as we don't want to interfere with HTTP/1.1 traffic\n\t// on the user's server. We enforce TLS 1.2 later once\n\t// we accept a connection. Ideally this should be done\n\t// during next-proto selection, but using TLS <1.2 with\n\t// HTTP/2 is still the client's bug.\n\n\t// Be sure we advertise tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n\t// at least.\n\t// TODO: enable PreferServerCipherSuites?\n\tif s.TLSConfig.CipherSuites != nil {\n\t\tconst requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n\t\thaveRequired := false\n\t\tfor _, v := range s.TLSConfig.CipherSuites {\n\t\t\tif v == requiredCipher {\n\t\t\t\thaveRequired = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !haveRequired {\n\t\t\ts.TLSConfig.CipherSuites = append(s.TLSConfig.CipherSuites, requiredCipher)\n\t\t}\n\t}\n\n\thaveNPN := false\n\tfor _, p := range s.TLSConfig.NextProtos {\n\t\tif p == NextProtoTLS {\n\t\t\thaveNPN = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !haveNPN {\n\t\ts.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)\n\t}\n\t// h2-14 is temporary (as of 2015-03-05) while we wait for all browsers\n\t// to switch to \"h2\".\n\ts.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, \"h2-14\")\n\n\tif s.TLSNextProto == nil {\n\t\ts.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}\n\t}\n\tprotoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {\n\t\tif testHookOnConn != nil {\n\t\t\ttestHookOnConn()\n\t\t}\n\t\tconf.handleConn(hs, c, h)\n\t}\n\ts.TLSNextProto[NextProtoTLS] = protoHandler\n\ts.TLSNextProto[\"h2-14\"] = protoHandler // temporary; see above.\n}\n\nfunc (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {\n\tsc := &serverConn{\n\t\tsrv:              srv,\n\t\ths:               hs,\n\t\tconn:             c,\n\t\tremoteAddrStr:    c.RemoteAddr().String(),\n\t\tbw:               newBufferedWriter(c),\n\t\thandler:          h,\n\t\tstreams:          make(map[uint32]*stream),\n\t\treadFrameCh:      make(chan frameAndGate),\n\t\treadFrameErrCh:   make(chan error, 1), // must be buffered for 1\n\t\twantWriteFrameCh: make(chan frameWriteMsg, 8),\n\t\twroteFrameCh:     make(chan struct{}, 1), // buffered; one send in reading goroutine\n\t\tbodyReadCh:       make(chan bodyReadMsg), // buffering doesn't matter either way\n\t\tdoneServing:      make(chan struct{}),\n\t\tadvMaxStreams:    srv.maxConcurrentStreams(),\n\t\twriteSched: writeScheduler{\n\t\t\tmaxFrameSize: initialMaxFrameSize,\n\t\t},\n\t\tinitialWindowSize: initialWindowSize,\n\t\theaderTableSize:   initialHeaderTableSize,\n\t\tserveG:            newGoroutineLock(),\n\t\tpushEnabled:       true,\n\t}\n\tsc.flow.add(initialWindowSize)\n\tsc.inflow.add(initialWindowSize)\n\tsc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)\n\tsc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField)\n\n\tfr := NewFramer(sc.bw, c)\n\tfr.SetMaxReadFrameSize(srv.maxReadFrameSize())\n\tsc.framer = fr\n\n\tif tc, ok := c.(*tls.Conn); ok {\n\t\tsc.tlsState = new(tls.ConnectionState)\n\t\t*sc.tlsState = tc.ConnectionState()\n\t\t// 9.2 Use of TLS Features\n\t\t// An implementation of HTTP/2 over TLS MUST use TLS\n\t\t// 1.2 or higher with the restrictions on feature set\n\t\t// and cipher suite described in this section. Due to\n\t\t// implementation limitations, it might not be\n\t\t// possible to fail TLS negotiation. An endpoint MUST\n\t\t// immediately terminate an HTTP/2 connection that\n\t\t// does not meet the TLS requirements described in\n\t\t// this section with a connection error (Section\n\t\t// 5.4.1) of type INADEQUATE_SECURITY.\n\t\tif sc.tlsState.Version < tls.VersionTLS12 {\n\t\t\tsc.rejectConn(ErrCodeInadequateSecurity, \"TLS version too low\")\n\t\t\treturn\n\t\t}\n\n\t\tif sc.tlsState.ServerName == \"\" {\n\t\t\t// Client must use SNI, but we don't enforce that anymore,\n\t\t\t// since it was causing problems when connecting to bare IP\n\t\t\t// addresses during development.\n\t\t\t//\n\t\t\t// TODO: optionally enforce? Or enforce at the time we receive\n\t\t\t// a new request, and verify the the ServerName matches the :authority?\n\t\t\t// But that precludes proxy situations, perhaps.\n\t\t\t//\n\t\t\t// So for now, do nothing here again.\n\t\t}\n\n\t\tif !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {\n\t\t\t// \"Endpoints MAY choose to generate a connection error\n\t\t\t// (Section 5.4.1) of type INADEQUATE_SECURITY if one of\n\t\t\t// the prohibited cipher suites are negotiated.\"\n\t\t\t//\n\t\t\t// We choose that. In my opinion, the spec is weak\n\t\t\t// here. It also says both parties must support at least\n\t\t\t// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no\n\t\t\t// excuses here. If we really must, we could allow an\n\t\t\t// \"AllowInsecureWeakCiphers\" option on the server later.\n\t\t\t// Let's see how it plays out first.\n\t\t\tsc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf(\"Prohibited TLS 1.2 Cipher Suite: %x\", sc.tlsState.CipherSuite))\n\t\t\treturn\n\t\t}\n\t}\n\n\tif hook := testHookGetServerConn; hook != nil {\n\t\thook(sc)\n\t}\n\tsc.serve()\n}\n\n// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.\nfunc isBadCipher(cipher uint16) bool {\n\tswitch cipher {\n\tcase tls.TLS_RSA_WITH_RC4_128_SHA,\n\t\ttls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\ttls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,\n\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:\n\t\t// Reject cipher suites from Appendix A.\n\t\t// \"This list includes those cipher suites that do not\n\t\t// offer an ephemeral key exchange and those that are\n\t\t// based on the TLS null, stream or block cipher type\"\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (sc *serverConn) rejectConn(err ErrCode, debug string) {\n\tlog.Printf(\"REJECTING conn: %v, %s\", err, debug)\n\t// ignoring errors. hanging up anyway.\n\tsc.framer.WriteGoAway(0, err, []byte(debug))\n\tsc.bw.Flush()\n\tsc.conn.Close()\n}\n\n// frameAndGates coordinates the readFrames and serve\n// goroutines. Because the Framer interface only permits the most\n// recently-read Frame from being accessed, the readFrames goroutine\n// blocks until it has a frame, passes it to serve, and then waits for\n// serve to be done with it before reading the next one.\ntype frameAndGate struct {\n\tf Frame\n\tg gate\n}\n\ntype serverConn struct {\n\t// Immutable:\n\tsrv              *Server\n\ths               *http.Server\n\tconn             net.Conn\n\tbw               *bufferedWriter // writing to conn\n\thandler          http.Handler\n\tframer           *Framer\n\thpackDecoder     *hpack.Decoder\n\tdoneServing      chan struct{}     // closed when serverConn.serve ends\n\treadFrameCh      chan frameAndGate // written by serverConn.readFrames\n\treadFrameErrCh   chan error\n\twantWriteFrameCh chan frameWriteMsg   // from handlers -> serve\n\twroteFrameCh     chan struct{}        // from writeFrameAsync -> serve, tickles more frame writes\n\tbodyReadCh       chan bodyReadMsg     // from handlers -> serve\n\ttestHookCh       chan func()          // code to run on the serve loop\n\tflow             flow                 // conn-wide (not stream-specific) outbound flow control\n\tinflow           flow                 // conn-wide inbound flow control\n\ttlsState         *tls.ConnectionState // shared by all handlers, like net/http\n\tremoteAddrStr    string\n\n\t// Everything following is owned by the serve loop; use serveG.check():\n\tserveG                goroutineLock // used to verify funcs are on serve()\n\tpushEnabled           bool\n\tsawFirstSettings      bool // got the initial SETTINGS frame after the preface\n\tneedToSendSettingsAck bool\n\tunackedSettings       int    // how many SETTINGS have we sent without ACKs?\n\tclientMaxStreams      uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)\n\tadvMaxStreams         uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client\n\tcurOpenStreams        uint32 // client's number of open streams\n\tmaxStreamID           uint32 // max ever seen\n\tstreams               map[uint32]*stream\n\tinitialWindowSize     int32\n\theaderTableSize       uint32\n\tmaxHeaderListSize     uint32            // zero means unknown (default)\n\tcanonHeader           map[string]string // http2-lower-case -> Go-Canonical-Case\n\treq                   requestParam      // non-zero while reading request headers\n\twritingFrame          bool              // started write goroutine but haven't heard back on wroteFrameCh\n\tneedsFrameFlush       bool              // last frame write wasn't a flush\n\twriteSched            writeScheduler\n\tinGoAway              bool // we've started to or sent GOAWAY\n\tneedToSendGoAway      bool // we need to schedule a GOAWAY frame write\n\tgoAwayCode            ErrCode\n\tshutdownTimerCh       <-chan time.Time // nil until used\n\tshutdownTimer         *time.Timer      // nil until used\n\n\t// Owned by the writeFrameAsync goroutine:\n\theaderWriteBuf bytes.Buffer\n\thpackEncoder   *hpack.Encoder\n}\n\n// requestParam is the state of the next request, initialized over\n// potentially several frames HEADERS + zero or more CONTINUATION\n// frames.\ntype requestParam struct {\n\t// stream is non-nil if we're reading (HEADER or CONTINUATION)\n\t// frames for a request (but not DATA).\n\tstream            *stream\n\theader            http.Header\n\tmethod, path      string\n\tscheme, authority string\n\tsawRegularHeader  bool // saw a non-pseudo header already\n\tinvalidHeader     bool // an invalid header was seen\n}\n\n// stream represents a stream. This is the minimal metadata needed by\n// the serve goroutine. Most of the actual stream state is owned by\n// the http.Handler's goroutine in the responseWriter. Because the\n// responseWriter's responseWriterState is recycled at the end of a\n// handler, this struct intentionally has no pointer to the\n// *responseWriter{,State} itself, as the Handler ending nils out the\n// responseWriter's state field.\ntype stream struct {\n\t// immutable:\n\tid   uint32\n\tbody *pipe       // non-nil if expecting DATA frames\n\tcw   closeWaiter // closed wait stream transitions to closed state\n\n\t// owned by serverConn's serve loop:\n\tbodyBytes     int64   // body bytes seen so far\n\tdeclBodyBytes int64   // or -1 if undeclared\n\tflow          flow    // limits writing from Handler to client\n\tinflow        flow    // what the client is allowed to POST/etc to us\n\tparent        *stream // or nil\n\tweight        uint8\n\tstate         streamState\n\tsentReset     bool // only true once detached from streams map\n\tgotReset      bool // only true once detacted from streams map\n}\n\nfunc (sc *serverConn) Framer() *Framer  { return sc.framer }\nfunc (sc *serverConn) CloseConn() error { return sc.conn.Close() }\nfunc (sc *serverConn) Flush() error     { return sc.bw.Flush() }\nfunc (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {\n\treturn sc.hpackEncoder, &sc.headerWriteBuf\n}\n\nfunc (sc *serverConn) state(streamID uint32) (streamState, *stream) {\n\tsc.serveG.check()\n\t// http://http2.github.io/http2-spec/#rfc.section.5.1\n\tif st, ok := sc.streams[streamID]; ok {\n\t\treturn st.state, st\n\t}\n\t// \"The first use of a new stream identifier implicitly closes all\n\t// streams in the \"idle\" state that might have been initiated by\n\t// that peer with a lower-valued stream identifier. For example, if\n\t// a client sends a HEADERS frame on stream 7 without ever sending a\n\t// frame on stream 5, then stream 5 transitions to the \"closed\"\n\t// state when the first frame for stream 7 is sent or received.\"\n\tif streamID <= sc.maxStreamID {\n\t\treturn stateClosed, nil\n\t}\n\treturn stateIdle, nil\n}\n\nfunc (sc *serverConn) vlogf(format string, args ...interface{}) {\n\tif VerboseLogs {\n\t\tsc.logf(format, args...)\n\t}\n}\n\nfunc (sc *serverConn) logf(format string, args ...interface{}) {\n\tif lg := sc.hs.ErrorLog; lg != nil {\n\t\tlg.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\nfunc (sc *serverConn) condlogf(err error, format string, args ...interface{}) {\n\tif err == nil {\n\t\treturn\n\t}\n\tstr := err.Error()\n\tif err == io.EOF || strings.Contains(str, \"use of closed network connection\") {\n\t\t// Boring, expected errors.\n\t\tsc.vlogf(format, args...)\n\t} else {\n\t\tsc.logf(format, args...)\n\t}\n}\n\nfunc (sc *serverConn) onNewHeaderField(f hpack.HeaderField) {\n\tsc.serveG.check()\n\tsc.vlogf(\"got header field %+v\", f)\n\tswitch {\n\tcase !validHeader(f.Name):\n\t\tsc.req.invalidHeader = true\n\tcase strings.HasPrefix(f.Name, \":\"):\n\t\tif sc.req.sawRegularHeader {\n\t\t\tsc.logf(\"pseudo-header after regular header\")\n\t\t\tsc.req.invalidHeader = true\n\t\t\treturn\n\t\t}\n\t\tvar dst *string\n\t\tswitch f.Name {\n\t\tcase \":method\":\n\t\t\tdst = &sc.req.method\n\t\tcase \":path\":\n\t\t\tdst = &sc.req.path\n\t\tcase \":scheme\":\n\t\t\tdst = &sc.req.scheme\n\t\tcase \":authority\":\n\t\t\tdst = &sc.req.authority\n\t\tdefault:\n\t\t\t// 8.1.2.1 Pseudo-Header Fields\n\t\t\t// \"Endpoints MUST treat a request or response\n\t\t\t// that contains undefined or invalid\n\t\t\t// pseudo-header fields as malformed (Section\n\t\t\t// 8.1.2.6).\"\n\t\t\tsc.logf(\"invalid pseudo-header %q\", f.Name)\n\t\t\tsc.req.invalidHeader = true\n\t\t\treturn\n\t\t}\n\t\tif *dst != \"\" {\n\t\t\tsc.logf(\"duplicate pseudo-header %q sent\", f.Name)\n\t\t\tsc.req.invalidHeader = true\n\t\t\treturn\n\t\t}\n\t\t*dst = f.Value\n\tcase f.Name == \"cookie\":\n\t\tsc.req.sawRegularHeader = true\n\t\tif s, ok := sc.req.header[\"Cookie\"]; ok && len(s) == 1 {\n\t\t\ts[0] = s[0] + \"; \" + f.Value\n\t\t} else {\n\t\t\tsc.req.header.Add(\"Cookie\", f.Value)\n\t\t}\n\tdefault:\n\t\tsc.req.sawRegularHeader = true\n\t\tsc.req.header.Add(sc.canonicalHeader(f.Name), f.Value)\n\t}\n}\n\nfunc (sc *serverConn) canonicalHeader(v string) string {\n\tsc.serveG.check()\n\tcv, ok := commonCanonHeader[v]\n\tif ok {\n\t\treturn cv\n\t}\n\tcv, ok = sc.canonHeader[v]\n\tif ok {\n\t\treturn cv\n\t}\n\tif sc.canonHeader == nil {\n\t\tsc.canonHeader = make(map[string]string)\n\t}\n\tcv = http.CanonicalHeaderKey(v)\n\tsc.canonHeader[v] = cv\n\treturn cv\n}\n\n// readFrames is the loop that reads incoming frames.\n// It's run on its own goroutine.\nfunc (sc *serverConn) readFrames() {\n\tg := make(gate, 1)\n\tfor {\n\t\tf, err := sc.framer.ReadFrame()\n\t\tif err != nil {\n\t\t\tsc.readFrameErrCh <- err\n\t\t\tclose(sc.readFrameCh)\n\t\t\treturn\n\t\t}\n\t\tsc.readFrameCh <- frameAndGate{f, g}\n\t\t// We can't read another frame until this one is\n\t\t// processed, as the ReadFrame interface doesn't copy\n\t\t// memory.  The Frame accessor methods access the last\n\t\t// frame's (shared) buffer. So we wait for the\n\t\t// serve goroutine to tell us it's done:\n\t\tg.Wait()\n\t}\n}\n\n// writeFrameAsync runs in its own goroutine and writes a single frame\n// and then reports when it's done.\n// At most one goroutine can be running writeFrameAsync at a time per\n// serverConn.\nfunc (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {\n\terr := wm.write.writeFrame(sc)\n\tif ch := wm.done; ch != nil {\n\t\tselect {\n\t\tcase ch <- err:\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unbuffered done channel passed in for type %T\", wm.write))\n\t\t}\n\t}\n\tsc.wroteFrameCh <- struct{}{} // tickle frame selection scheduler\n}\n\nfunc (sc *serverConn) closeAllStreamsOnConnClose() {\n\tsc.serveG.check()\n\tfor _, st := range sc.streams {\n\t\tsc.closeStream(st, errClientDisconnected)\n\t}\n}\n\nfunc (sc *serverConn) stopShutdownTimer() {\n\tsc.serveG.check()\n\tif t := sc.shutdownTimer; t != nil {\n\t\tt.Stop()\n\t}\n}\n\nfunc (sc *serverConn) notePanic() {\n\tif testHookOnPanicMu != nil {\n\t\ttestHookOnPanicMu.Lock()\n\t\tdefer testHookOnPanicMu.Unlock()\n\t}\n\tif testHookOnPanic != nil {\n\t\tif e := recover(); e != nil {\n\t\t\tif testHookOnPanic(sc, e) {\n\t\t\t\tpanic(e)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (sc *serverConn) serve() {\n\tsc.serveG.check()\n\tdefer sc.notePanic()\n\tdefer sc.conn.Close()\n\tdefer sc.closeAllStreamsOnConnClose()\n\tdefer sc.stopShutdownTimer()\n\tdefer close(sc.doneServing) // unblocks handlers trying to send\n\n\tsc.vlogf(\"HTTP/2 connection from %v on %p\", sc.conn.RemoteAddr(), sc.hs)\n\n\tsc.writeFrame(frameWriteMsg{\n\t\twrite: writeSettings{\n\t\t\t{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},\n\t\t\t{SettingMaxConcurrentStreams, sc.advMaxStreams},\n\n\t\t\t// TODO: more actual settings, notably\n\t\t\t// SettingInitialWindowSize, but then we also\n\t\t\t// want to bump up the conn window size the\n\t\t\t// same amount here right after the settings\n\t\t},\n\t})\n\tsc.unackedSettings++\n\n\tif err := sc.readPreface(); err != nil {\n\t\tsc.condlogf(err, \"error reading preface from client %v: %v\", sc.conn.RemoteAddr(), err)\n\t\treturn\n\t}\n\n\tgo sc.readFrames() // closed by defer sc.conn.Close above\n\n\tsettingsTimer := time.NewTimer(firstSettingsTimeout)\n\tfor {\n\t\tselect {\n\t\tcase wm := <-sc.wantWriteFrameCh:\n\t\t\tsc.writeFrame(wm)\n\t\tcase <-sc.wroteFrameCh:\n\t\t\tif sc.writingFrame != true {\n\t\t\t\tpanic(\"internal error: expected to be already writing a frame\")\n\t\t\t}\n\t\t\tsc.writingFrame = false\n\t\t\tsc.scheduleFrameWrite()\n\t\tcase fg, ok := <-sc.readFrameCh:\n\t\t\tif !ok {\n\t\t\t\tsc.readFrameCh = nil\n\t\t\t}\n\t\t\tif !sc.processFrameFromReader(fg, ok) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif settingsTimer.C != nil {\n\t\t\t\tsettingsTimer.Stop()\n\t\t\t\tsettingsTimer.C = nil\n\t\t\t}\n\t\tcase m := <-sc.bodyReadCh:\n\t\t\tsc.noteBodyRead(m.st, m.n)\n\t\tcase <-settingsTimer.C:\n\t\t\tsc.logf(\"timeout waiting for SETTINGS frames from %v\", sc.conn.RemoteAddr())\n\t\t\treturn\n\t\tcase <-sc.shutdownTimerCh:\n\t\t\tsc.vlogf(\"GOAWAY close timer fired; closing conn from %v\", sc.conn.RemoteAddr())\n\t\t\treturn\n\t\tcase fn := <-sc.testHookCh:\n\t\t\tfn()\n\t\t}\n\t}\n}\n\n// readPreface reads the ClientPreface greeting from the peer\n// or returns an error on timeout or an invalid greeting.\nfunc (sc *serverConn) readPreface() error {\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\t// Read the client preface\n\t\tbuf := make([]byte, len(ClientPreface))\n\t\tif _, err := io.ReadFull(sc.conn, buf); err != nil {\n\t\t\terrc <- err\n\t\t} else if !bytes.Equal(buf, clientPreface) {\n\t\t\terrc <- fmt.Errorf(\"bogus greeting %q\", buf)\n\t\t} else {\n\t\t\terrc <- nil\n\t\t}\n\t}()\n\ttimer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?\n\tdefer timer.Stop()\n\tselect {\n\tcase <-timer.C:\n\t\treturn errors.New(\"timeout waiting for client preface\")\n\tcase err := <-errc:\n\t\tif err == nil {\n\t\t\tsc.vlogf(\"client %v said hello\", sc.conn.RemoteAddr())\n\t\t}\n\t\treturn err\n\t}\n}\n\n// writeDataFromHandler writes the data described in req to stream.id.\n//\n// The provided ch is used to avoid allocating new channels for each\n// write operation. It's expected that the caller reuses writeData and ch\n// over time.\n//\n// The flow control currently happens in the Handler where it waits\n// for 1 or more bytes to be available to then write here.  So at this\n// point we know that we have flow control. But this might have to\n// change when priority is implemented, so the serve goroutine knows\n// the total amount of bytes waiting to be sent and can can have more\n// scheduling decisions available.\nfunc (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData, ch chan error) error {\n\tsc.writeFrameFromHandler(frameWriteMsg{\n\t\twrite:  writeData,\n\t\tstream: stream,\n\t\tdone:   ch,\n\t})\n\tselect {\n\tcase err := <-ch:\n\t\treturn err\n\tcase <-sc.doneServing:\n\t\treturn errClientDisconnected\n\tcase <-stream.cw:\n\t\treturn errStreamBroken\n\t}\n}\n\n// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts\n// if the connection has gone away.\n//\n// This must not be run from the serve goroutine itself, else it might\n// deadlock writing to sc.wantWriteFrameCh (which is only mildly\n// buffered and is read by serve itself). If you're on the serve\n// goroutine, call writeFrame instead.\nfunc (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) {\n\tsc.serveG.checkNotOn() // NOT\n\tselect {\n\tcase sc.wantWriteFrameCh <- wm:\n\tcase <-sc.doneServing:\n\t\t// Client has closed their connection to the server.\n\t}\n}\n\n// writeFrame schedules a frame to write and sends it if there's nothing\n// already being written.\n//\n// There is no pushback here (the serve goroutine never blocks). It's\n// the http.Handlers that block, waiting for their previous frames to\n// make it onto the wire\n//\n// If you're not on the serve goroutine, use writeFrameFromHandler instead.\nfunc (sc *serverConn) writeFrame(wm frameWriteMsg) {\n\tsc.serveG.check()\n\tsc.writeSched.add(wm)\n\tsc.scheduleFrameWrite()\n}\n\n// startFrameWrite starts a goroutine to write wm (in a separate\n// goroutine since that might block on the network), and updates the\n// serve goroutine's state about the world, updated from info in wm.\nfunc (sc *serverConn) startFrameWrite(wm frameWriteMsg) {\n\tsc.serveG.check()\n\tif sc.writingFrame {\n\t\tpanic(\"internal error: can only be writing one frame at a time\")\n\t}\n\tsc.writingFrame = true\n\n\tst := wm.stream\n\tif st != nil {\n\t\tswitch st.state {\n\t\tcase stateHalfClosedLocal:\n\t\t\tpanic(\"internal error: attempt to send frame on half-closed-local stream\")\n\t\tcase stateClosed:\n\t\t\tif st.sentReset || st.gotReset {\n\t\t\t\t// Skip this frame. But fake the frame write to reschedule:\n\t\t\t\tsc.wroteFrameCh <- struct{}{}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpanic(fmt.Sprintf(\"internal error: attempt to send a write %v on a closed stream\", wm))\n\t\t}\n\t}\n\n\tsc.needsFrameFlush = true\n\tif endsStream(wm.write) {\n\t\tif st == nil {\n\t\t\tpanic(\"internal error: expecting non-nil stream\")\n\t\t}\n\t\tswitch st.state {\n\t\tcase stateOpen:\n\t\t\t// Here we would go to stateHalfClosedLocal in\n\t\t\t// theory, but since our handler is done and\n\t\t\t// the net/http package provides no mechanism\n\t\t\t// for finishing writing to a ResponseWriter\n\t\t\t// while still reading data (see possible TODO\n\t\t\t// at top of this file), we go into closed\n\t\t\t// state here anyway, after telling the peer\n\t\t\t// we're hanging up on them.\n\t\t\tst.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream\n\t\t\terrCancel := StreamError{st.id, ErrCodeCancel}\n\t\t\tsc.resetStream(errCancel)\n\t\tcase stateHalfClosedRemote:\n\t\t\tsc.closeStream(st, nil)\n\t\t}\n\t}\n\tgo sc.writeFrameAsync(wm)\n}\n\n// scheduleFrameWrite tickles the frame writing scheduler.\n//\n// If a frame is already being written, nothing happens. This will be called again\n// when the frame is done being written.\n//\n// If a frame isn't being written we need to send one, the best frame\n// to send is selected, preferring first things that aren't\n// stream-specific (e.g. ACKing settings), and then finding the\n// highest priority stream.\n//\n// If a frame isn't being written and there's nothing else to send, we\n// flush the write buffer.\nfunc (sc *serverConn) scheduleFrameWrite() {\n\tsc.serveG.check()\n\tif sc.writingFrame {\n\t\treturn\n\t}\n\tif sc.needToSendGoAway {\n\t\tsc.needToSendGoAway = false\n\t\tsc.startFrameWrite(frameWriteMsg{\n\t\t\twrite: &writeGoAway{\n\t\t\t\tmaxStreamID: sc.maxStreamID,\n\t\t\t\tcode:        sc.goAwayCode,\n\t\t\t},\n\t\t})\n\t\treturn\n\t}\n\tif sc.needToSendSettingsAck {\n\t\tsc.needToSendSettingsAck = false\n\t\tsc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})\n\t\treturn\n\t}\n\tif !sc.inGoAway {\n\t\tif wm, ok := sc.writeSched.take(); ok {\n\t\t\tsc.startFrameWrite(wm)\n\t\t\treturn\n\t\t}\n\t}\n\tif sc.needsFrameFlush {\n\t\tsc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})\n\t\tsc.needsFrameFlush = false // after startFrameWrite, since it sets this true\n\t\treturn\n\t}\n}\n\nfunc (sc *serverConn) goAway(code ErrCode) {\n\tsc.serveG.check()\n\tif sc.inGoAway {\n\t\treturn\n\t}\n\tif code != ErrCodeNo {\n\t\tsc.shutDownIn(250 * time.Millisecond)\n\t} else {\n\t\t// TODO: configurable\n\t\tsc.shutDownIn(1 * time.Second)\n\t}\n\tsc.inGoAway = true\n\tsc.needToSendGoAway = true\n\tsc.goAwayCode = code\n\tsc.scheduleFrameWrite()\n}\n\nfunc (sc *serverConn) shutDownIn(d time.Duration) {\n\tsc.serveG.check()\n\tsc.shutdownTimer = time.NewTimer(d)\n\tsc.shutdownTimerCh = sc.shutdownTimer.C\n}\n\nfunc (sc *serverConn) resetStream(se StreamError) {\n\tsc.serveG.check()\n\tsc.writeFrame(frameWriteMsg{write: se})\n\tif st, ok := sc.streams[se.StreamID]; ok {\n\t\tst.sentReset = true\n\t\tsc.closeStream(st, se)\n\t}\n}\n\n// curHeaderStreamID returns the stream ID of the header block we're\n// currently in the middle of reading. If this returns non-zero, the\n// next frame must be a CONTINUATION with this stream id.\nfunc (sc *serverConn) curHeaderStreamID() uint32 {\n\tsc.serveG.check()\n\tst := sc.req.stream\n\tif st == nil {\n\t\treturn 0\n\t}\n\treturn st.id\n}\n\n// processFrameFromReader processes the serve loop's read from readFrameCh from the\n// frame-reading goroutine.\n// processFrameFromReader returns whether the connection should be kept open.\nfunc (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool {\n\tsc.serveG.check()\n\tvar clientGone bool\n\tvar err error\n\tif !fgValid {\n\t\terr = <-sc.readFrameErrCh\n\t\tif err == ErrFrameTooLarge {\n\t\t\tsc.goAway(ErrCodeFrameSize)\n\t\t\treturn true // goAway will close the loop\n\t\t}\n\t\tclientGone = err == io.EOF || strings.Contains(err.Error(), \"use of closed network connection\")\n\t\tif clientGone {\n\t\t\t// TODO: could we also get into this state if\n\t\t\t// the peer does a half close\n\t\t\t// (e.g. CloseWrite) because they're done\n\t\t\t// sending frames but they're still wanting\n\t\t\t// our open replies?  Investigate.\n\t\t\t// TODO: add CloseWrite to crypto/tls.Conn first\n\t\t\t// so we have a way to test this? I suppose\n\t\t\t// just for testing we could have a non-TLS mode.\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif fgValid {\n\t\tf := fg.f\n\t\tsc.vlogf(\"got %v: %#v\", f.Header(), f)\n\t\terr = sc.processFrame(f)\n\t\tfg.g.Done() // unblock the readFrames goroutine\n\t\tif err == nil {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tswitch ev := err.(type) {\n\tcase StreamError:\n\t\tsc.resetStream(ev)\n\t\treturn true\n\tcase goAwayFlowError:\n\t\tsc.goAway(ErrCodeFlowControl)\n\t\treturn true\n\tcase ConnectionError:\n\t\tsc.logf(\"%v: %v\", sc.conn.RemoteAddr(), ev)\n\t\tsc.goAway(ErrCode(ev))\n\t\treturn true // goAway will handle shutdown\n\tdefault:\n\t\tif !fgValid {\n\t\t\tsc.logf(\"disconnecting; error reading frame from client %s: %v\", sc.conn.RemoteAddr(), err)\n\t\t} else {\n\t\t\tsc.logf(\"disconnection due to other error: %v\", err)\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (sc *serverConn) processFrame(f Frame) error {\n\tsc.serveG.check()\n\n\t// First frame received must be SETTINGS.\n\tif !sc.sawFirstSettings {\n\t\tif _, ok := f.(*SettingsFrame); !ok {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\tsc.sawFirstSettings = true\n\t}\n\n\tif s := sc.curHeaderStreamID(); s != 0 {\n\t\tif cf, ok := f.(*ContinuationFrame); !ok {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t} else if cf.Header().StreamID != s {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t}\n\n\tswitch f := f.(type) {\n\tcase *SettingsFrame:\n\t\treturn sc.processSettings(f)\n\tcase *HeadersFrame:\n\t\treturn sc.processHeaders(f)\n\tcase *ContinuationFrame:\n\t\treturn sc.processContinuation(f)\n\tcase *WindowUpdateFrame:\n\t\treturn sc.processWindowUpdate(f)\n\tcase *PingFrame:\n\t\treturn sc.processPing(f)\n\tcase *DataFrame:\n\t\treturn sc.processData(f)\n\tcase *RSTStreamFrame:\n\t\treturn sc.processResetStream(f)\n\tcase *PriorityFrame:\n\t\treturn sc.processPriority(f)\n\tcase *PushPromiseFrame:\n\t\t// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE\n\t\t// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\tdefault:\n\t\tlog.Printf(\"Ignoring frame: %v\", f.Header())\n\t\treturn nil\n\t}\n}\n\nfunc (sc *serverConn) processPing(f *PingFrame) error {\n\tsc.serveG.check()\n\tif f.Flags.Has(FlagSettingsAck) {\n\t\t// 6.7 PING: \" An endpoint MUST NOT respond to PING frames\n\t\t// containing this flag.\"\n\t\treturn nil\n\t}\n\tif f.StreamID != 0 {\n\t\t// \"PING frames are not associated with any individual\n\t\t// stream. If a PING frame is received with a stream\n\t\t// identifier field value other than 0x0, the recipient MUST\n\t\t// respond with a connection error (Section 5.4.1) of type\n\t\t// PROTOCOL_ERROR.\"\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tsc.writeFrame(frameWriteMsg{write: writePingAck{f}})\n\treturn nil\n}\n\nfunc (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {\n\tsc.serveG.check()\n\tswitch {\n\tcase f.StreamID != 0: // stream-level flow control\n\t\tst := sc.streams[f.StreamID]\n\t\tif st == nil {\n\t\t\t// \"WINDOW_UPDATE can be sent by a peer that has sent a\n\t\t\t// frame bearing the END_STREAM flag. This means that a\n\t\t\t// receiver could receive a WINDOW_UPDATE frame on a \"half\n\t\t\t// closed (remote)\" or \"closed\" stream. A receiver MUST\n\t\t\t// NOT treat this as an error, see Section 5.1.\"\n\t\t\treturn nil\n\t\t}\n\t\tif !st.flow.add(int32(f.Increment)) {\n\t\t\treturn StreamError{f.StreamID, ErrCodeFlowControl}\n\t\t}\n\tdefault: // connection-level flow control\n\t\tif !sc.flow.add(int32(f.Increment)) {\n\t\t\treturn goAwayFlowError{}\n\t\t}\n\t}\n\tsc.scheduleFrameWrite()\n\treturn nil\n}\n\nfunc (sc *serverConn) processResetStream(f *RSTStreamFrame) error {\n\tsc.serveG.check()\n\n\tstate, st := sc.state(f.StreamID)\n\tif state == stateIdle {\n\t\t// 6.4 \"RST_STREAM frames MUST NOT be sent for a\n\t\t// stream in the \"idle\" state. If a RST_STREAM frame\n\t\t// identifying an idle stream is received, the\n\t\t// recipient MUST treat this as a connection error\n\t\t// (Section 5.4.1) of type PROTOCOL_ERROR.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tif st != nil {\n\t\tst.gotReset = true\n\t\tsc.closeStream(st, StreamError{f.StreamID, f.ErrCode})\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) closeStream(st *stream, err error) {\n\tsc.serveG.check()\n\tif st.state == stateIdle || st.state == stateClosed {\n\t\tpanic(fmt.Sprintf(\"invariant; can't close stream in state %v\", st.state))\n\t}\n\tst.state = stateClosed\n\tsc.curOpenStreams--\n\tdelete(sc.streams, st.id)\n\tif p := st.body; p != nil {\n\t\tp.Close(err)\n\t}\n\tst.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc\n\tsc.writeSched.forgetStream(st.id)\n}\n\nfunc (sc *serverConn) processSettings(f *SettingsFrame) error {\n\tsc.serveG.check()\n\tif f.IsAck() {\n\t\tsc.unackedSettings--\n\t\tif sc.unackedSettings < 0 {\n\t\t\t// Why is the peer ACKing settings we never sent?\n\t\t\t// The spec doesn't mention this case, but\n\t\t\t// hang up on them anyway.\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\treturn nil\n\t}\n\tif err := f.ForeachSetting(sc.processSetting); err != nil {\n\t\treturn err\n\t}\n\tsc.needToSendSettingsAck = true\n\tsc.scheduleFrameWrite()\n\treturn nil\n}\n\nfunc (sc *serverConn) processSetting(s Setting) error {\n\tsc.serveG.check()\n\tif err := s.Valid(); err != nil {\n\t\treturn err\n\t}\n\tsc.vlogf(\"processing setting %v\", s)\n\tswitch s.ID {\n\tcase SettingHeaderTableSize:\n\t\tsc.headerTableSize = s.Val\n\t\tsc.hpackEncoder.SetMaxDynamicTableSize(s.Val)\n\tcase SettingEnablePush:\n\t\tsc.pushEnabled = s.Val != 0\n\tcase SettingMaxConcurrentStreams:\n\t\tsc.clientMaxStreams = s.Val\n\tcase SettingInitialWindowSize:\n\t\treturn sc.processSettingInitialWindowSize(s.Val)\n\tcase SettingMaxFrameSize:\n\t\tsc.writeSched.maxFrameSize = s.Val\n\tcase SettingMaxHeaderListSize:\n\t\tsc.maxHeaderListSize = s.Val\n\tdefault:\n\t\t// Unknown setting: \"An endpoint that receives a SETTINGS\n\t\t// frame with any unknown or unsupported identifier MUST\n\t\t// ignore that setting.\"\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) processSettingInitialWindowSize(val uint32) error {\n\tsc.serveG.check()\n\t// Note: val already validated to be within range by\n\t// processSetting's Valid call.\n\n\t// \"A SETTINGS frame can alter the initial flow control window\n\t// size for all current streams. When the value of\n\t// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST\n\t// adjust the size of all stream flow control windows that it\n\t// maintains by the difference between the new value and the\n\t// old value.\"\n\told := sc.initialWindowSize\n\tsc.initialWindowSize = int32(val)\n\tgrowth := sc.initialWindowSize - old // may be negative\n\tfor _, st := range sc.streams {\n\t\tif !st.flow.add(growth) {\n\t\t\t// 6.9.2 Initial Flow Control Window Size\n\t\t\t// \"An endpoint MUST treat a change to\n\t\t\t// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow\n\t\t\t// control window to exceed the maximum size as a\n\t\t\t// connection error (Section 5.4.1) of type\n\t\t\t// FLOW_CONTROL_ERROR.\"\n\t\t\treturn ConnectionError(ErrCodeFlowControl)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) processData(f *DataFrame) error {\n\tsc.serveG.check()\n\t// \"If a DATA frame is received whose stream is not in \"open\"\n\t// or \"half closed (local)\" state, the recipient MUST respond\n\t// with a stream error (Section 5.4.2) of type STREAM_CLOSED.\"\n\tid := f.Header().StreamID\n\tst, ok := sc.streams[id]\n\tif !ok || st.state != stateOpen {\n\t\t// This includes sending a RST_STREAM if the stream is\n\t\t// in stateHalfClosedLocal (which currently means that\n\t\t// the http.Handler returned, so it's done reading &\n\t\t// done writing). Try to stop the client from sending\n\t\t// more DATA.\n\t\treturn StreamError{id, ErrCodeStreamClosed}\n\t}\n\tif st.body == nil {\n\t\tpanic(\"internal error: should have a body in this state\")\n\t}\n\tdata := f.Data()\n\n\t// Sender sending more than they'd declared?\n\tif st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {\n\t\tst.body.Close(fmt.Errorf(\"sender tried to send more than declared Content-Length of %d bytes\", st.declBodyBytes))\n\t\treturn StreamError{id, ErrCodeStreamClosed}\n\t}\n\tif len(data) > 0 {\n\t\t// Check whether the client has flow control quota.\n\t\tif int(st.inflow.available()) < len(data) {\n\t\t\treturn StreamError{id, ErrCodeFlowControl}\n\t\t}\n\t\tst.inflow.take(int32(len(data)))\n\t\twrote, err := st.body.Write(data)\n\t\tif err != nil {\n\t\t\treturn StreamError{id, ErrCodeStreamClosed}\n\t\t}\n\t\tif wrote != len(data) {\n\t\t\tpanic(\"internal error: bad Writer\")\n\t\t}\n\t\tst.bodyBytes += int64(len(data))\n\t}\n\tif f.StreamEnded() {\n\t\tif st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {\n\t\t\tst.body.Close(fmt.Errorf(\"request declared a Content-Length of %d but only wrote %d bytes\",\n\t\t\t\tst.declBodyBytes, st.bodyBytes))\n\t\t} else {\n\t\t\tst.body.Close(io.EOF)\n\t\t}\n\t\tst.state = stateHalfClosedRemote\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) processHeaders(f *HeadersFrame) error {\n\tsc.serveG.check()\n\tid := f.Header().StreamID\n\tif sc.inGoAway {\n\t\t// Ignore.\n\t\treturn nil\n\t}\n\t// http://http2.github.io/http2-spec/#rfc.section.5.1.1\n\tif id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil {\n\t\t// Streams initiated by a client MUST use odd-numbered\n\t\t// stream identifiers. [...] The identifier of a newly\n\t\t// established stream MUST be numerically greater than all\n\t\t// streams that the initiating endpoint has opened or\n\t\t// reserved. [...]  An endpoint that receives an unexpected\n\t\t// stream identifier MUST respond with a connection error\n\t\t// (Section 5.4.1) of type PROTOCOL_ERROR.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tif id > sc.maxStreamID {\n\t\tsc.maxStreamID = id\n\t}\n\tst := &stream{\n\t\tid:    id,\n\t\tstate: stateOpen,\n\t}\n\tif f.StreamEnded() {\n\t\tst.state = stateHalfClosedRemote\n\t}\n\tst.cw.Init()\n\n\tst.flow.conn = &sc.flow // link to conn-level counter\n\tst.flow.add(sc.initialWindowSize)\n\tst.inflow.conn = &sc.inflow      // link to conn-level counter\n\tst.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings\n\n\tsc.streams[id] = st\n\tif f.HasPriority() {\n\t\tadjustStreamPriority(sc.streams, st.id, f.Priority)\n\t}\n\tsc.curOpenStreams++\n\tsc.req = requestParam{\n\t\tstream: st,\n\t\theader: make(http.Header),\n\t}\n\treturn sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())\n}\n\nfunc (sc *serverConn) processContinuation(f *ContinuationFrame) error {\n\tsc.serveG.check()\n\tst := sc.streams[f.Header().StreamID]\n\tif st == nil || sc.curHeaderStreamID() != st.id {\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\treturn sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())\n}\n\nfunc (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error {\n\tsc.serveG.check()\n\tif _, err := sc.hpackDecoder.Write(frag); err != nil {\n\t\t// TODO: convert to stream error I assume?\n\t\treturn err\n\t}\n\tif !end {\n\t\treturn nil\n\t}\n\tif err := sc.hpackDecoder.Close(); err != nil {\n\t\t// TODO: convert to stream error I assume?\n\t\treturn err\n\t}\n\tdefer sc.resetPendingRequest()\n\tif sc.curOpenStreams > sc.advMaxStreams {\n\t\t// \"Endpoints MUST NOT exceed the limit set by their\n\t\t// peer. An endpoint that receives a HEADERS frame\n\t\t// that causes their advertised concurrent stream\n\t\t// limit to be exceeded MUST treat this as a stream\n\t\t// error (Section 5.4.2) of type PROTOCOL_ERROR or\n\t\t// REFUSED_STREAM.\"\n\t\tif sc.unackedSettings == 0 {\n\t\t\t// They should know better.\n\t\t\treturn StreamError{st.id, ErrCodeProtocol}\n\t\t}\n\t\t// Assume it's a network race, where they just haven't\n\t\t// received our last SETTINGS update. But actually\n\t\t// this can't happen yet, because we don't yet provide\n\t\t// a way for users to adjust server parameters at\n\t\t// runtime.\n\t\treturn StreamError{st.id, ErrCodeRefusedStream}\n\t}\n\n\trw, req, err := sc.newWriterAndRequest()\n\tif err != nil {\n\t\treturn err\n\t}\n\tst.body = req.Body.(*requestBody).pipe // may be nil\n\tst.declBodyBytes = req.ContentLength\n\tgo sc.runHandler(rw, req)\n\treturn nil\n}\n\nfunc (sc *serverConn) processPriority(f *PriorityFrame) error {\n\tadjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)\n\treturn nil\n}\n\nfunc adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {\n\tst, ok := streams[streamID]\n\tif !ok {\n\t\t// TODO: not quite correct (this streamID might\n\t\t// already exist in the dep tree, but be closed), but\n\t\t// close enough for now.\n\t\treturn\n\t}\n\tst.weight = priority.Weight\n\tparent := streams[priority.StreamDep] // might be nil\n\tif parent == st {\n\t\t// if client tries to set this stream to be the parent of itself\n\t\t// ignore and keep going\n\t\treturn\n\t}\n\n\t// section 5.3.3: If a stream is made dependent on one of its\n\t// own dependencies, the formerly dependent stream is first\n\t// moved to be dependent on the reprioritized stream's previous\n\t// parent. The moved dependency retains its weight.\n\tfor piter := parent; piter != nil; piter = piter.parent {\n\t\tif piter == st {\n\t\t\tparent.parent = st.parent\n\t\t\tbreak\n\t\t}\n\t}\n\tst.parent = parent\n\tif priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {\n\t\tfor _, openStream := range streams {\n\t\t\tif openStream != st && openStream.parent == st.parent {\n\t\t\t\topenStream.parent = st\n\t\t\t}\n\t\t}\n\t}\n}\n\n// resetPendingRequest zeros out all state related to a HEADERS frame\n// and its zero or more CONTINUATION frames sent to start a new\n// request.\nfunc (sc *serverConn) resetPendingRequest() {\n\tsc.serveG.check()\n\tsc.req = requestParam{}\n}\n\nfunc (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) {\n\tsc.serveG.check()\n\trp := &sc.req\n\tif rp.invalidHeader || rp.method == \"\" || rp.path == \"\" ||\n\t\t(rp.scheme != \"https\" && rp.scheme != \"http\") {\n\t\t// See 8.1.2.6 Malformed Requests and Responses:\n\t\t//\n\t\t// Malformed requests or responses that are detected\n\t\t// MUST be treated as a stream error (Section 5.4.2)\n\t\t// of type PROTOCOL_ERROR.\"\n\t\t//\n\t\t// 8.1.2.3 Request Pseudo-Header Fields\n\t\t// \"All HTTP/2 requests MUST include exactly one valid\n\t\t// value for the :method, :scheme, and :path\n\t\t// pseudo-header fields\"\n\t\treturn nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}\n\t}\n\tvar tlsState *tls.ConnectionState // nil if not scheme https\n\tif rp.scheme == \"https\" {\n\t\ttlsState = sc.tlsState\n\t}\n\tauthority := rp.authority\n\tif authority == \"\" {\n\t\tauthority = rp.header.Get(\"Host\")\n\t}\n\tneedsContinue := rp.header.Get(\"Expect\") == \"100-continue\"\n\tif needsContinue {\n\t\trp.header.Del(\"Expect\")\n\t}\n\tbodyOpen := rp.stream.state == stateOpen\n\tbody := &requestBody{\n\t\tconn:          sc,\n\t\tstream:        rp.stream,\n\t\tneedsContinue: needsContinue,\n\t}\n\t// TODO: handle asterisk '*' requests + test\n\turl, err := url.ParseRequestURI(rp.path)\n\tif err != nil {\n\t\t// TODO: find the right error code?\n\t\treturn nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}\n\t}\n\treq := &http.Request{\n\t\tMethod:     rp.method,\n\t\tURL:        url,\n\t\tRemoteAddr: sc.remoteAddrStr,\n\t\tHeader:     rp.header,\n\t\tRequestURI: rp.path,\n\t\tProto:      \"HTTP/2.0\",\n\t\tProtoMajor: 2,\n\t\tProtoMinor: 0,\n\t\tTLS:        tlsState,\n\t\tHost:       authority,\n\t\tBody:       body,\n\t}\n\tif bodyOpen {\n\t\tbody.pipe = &pipe{\n\t\t\tb: buffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX\n\t\t}\n\t\tbody.pipe.c.L = &body.pipe.m\n\n\t\tif vv, ok := rp.header[\"Content-Length\"]; ok {\n\t\t\treq.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)\n\t\t} else {\n\t\t\treq.ContentLength = -1\n\t\t}\n\t}\n\n\trws := responseWriterStatePool.Get().(*responseWriterState)\n\tbwSave := rws.bw\n\t*rws = responseWriterState{} // zero all the fields\n\trws.conn = sc\n\trws.bw = bwSave\n\trws.bw.Reset(chunkWriter{rws})\n\trws.stream = rp.stream\n\trws.req = req\n\trws.body = body\n\trws.frameWriteCh = make(chan error, 1)\n\n\trw := &responseWriter{rws: rws}\n\treturn rw, req, nil\n}\n\n// Run on its own goroutine.\nfunc (sc *serverConn) runHandler(rw *responseWriter, req *http.Request) {\n\tdefer rw.handlerDone()\n\t// TODO: catch panics like net/http.Server\n\tsc.handler.ServeHTTP(rw, req)\n}\n\n// called from handler goroutines.\n// h may be nil.\nfunc (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders, tempCh chan error) {\n\tsc.serveG.checkNotOn() // NOT on\n\tvar errc chan error\n\tif headerData.h != nil {\n\t\t// If there's a header map (which we don't own), so we have to block on\n\t\t// waiting for this frame to be written, so an http.Flush mid-handler\n\t\t// writes out the correct value of keys, before a handler later potentially\n\t\t// mutates it.\n\t\terrc = tempCh\n\t}\n\tsc.writeFrameFromHandler(frameWriteMsg{\n\t\twrite:  headerData,\n\t\tstream: st,\n\t\tdone:   errc,\n\t})\n\tif errc != nil {\n\t\tselect {\n\t\tcase <-errc:\n\t\t\t// Ignore. Just for synchronization.\n\t\t\t// Any error will be handled in the writing goroutine.\n\t\tcase <-sc.doneServing:\n\t\t\t// Client has closed the connection.\n\t\t}\n\t}\n}\n\n// called from handler goroutines.\nfunc (sc *serverConn) write100ContinueHeaders(st *stream) {\n\tsc.writeFrameFromHandler(frameWriteMsg{\n\t\twrite:  write100ContinueHeadersFrame{st.id},\n\t\tstream: st,\n\t})\n}\n\n// A bodyReadMsg tells the server loop that the http.Handler read n\n// bytes of the DATA from the client on the given stream.\ntype bodyReadMsg struct {\n\tst *stream\n\tn  int\n}\n\n// called from handler goroutines.\n// Notes that the handler for the given stream ID read n bytes of its body\n// and schedules flow control tokens to be sent.\nfunc (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {\n\tsc.serveG.checkNotOn() // NOT on\n\tsc.bodyReadCh <- bodyReadMsg{st, n}\n}\n\nfunc (sc *serverConn) noteBodyRead(st *stream, n int) {\n\tsc.serveG.check()\n\tsc.sendWindowUpdate(nil, n) // conn-level\n\tif st.state != stateHalfClosedRemote && st.state != stateClosed {\n\t\t// Don't send this WINDOW_UPDATE if the stream is closed\n\t\t// remotely.\n\t\tsc.sendWindowUpdate(st, n)\n\t}\n}\n\n// st may be nil for conn-level\nfunc (sc *serverConn) sendWindowUpdate(st *stream, n int) {\n\tsc.serveG.check()\n\t// \"The legal range for the increment to the flow control\n\t// window is 1 to 2^31-1 (2,147,483,647) octets.\"\n\t// A Go Read call on 64-bit machines could in theory read\n\t// a larger Read than this. Very unlikely, but we handle it here\n\t// rather than elsewhere for now.\n\tconst maxUint31 = 1<<31 - 1\n\tfor n >= maxUint31 {\n\t\tsc.sendWindowUpdate32(st, maxUint31)\n\t\tn -= maxUint31\n\t}\n\tsc.sendWindowUpdate32(st, int32(n))\n}\n\n// st may be nil for conn-level\nfunc (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {\n\tsc.serveG.check()\n\tif n == 0 {\n\t\treturn\n\t}\n\tif n < 0 {\n\t\tpanic(\"negative update\")\n\t}\n\tvar streamID uint32\n\tif st != nil {\n\t\tstreamID = st.id\n\t}\n\tsc.writeFrame(frameWriteMsg{\n\t\twrite:  writeWindowUpdate{streamID: streamID, n: uint32(n)},\n\t\tstream: st,\n\t})\n\tvar ok bool\n\tif st == nil {\n\t\tok = sc.inflow.add(n)\n\t} else {\n\t\tok = st.inflow.add(n)\n\t}\n\tif !ok {\n\t\tpanic(\"internal error; sent too many window updates without decrements?\")\n\t}\n}\n\ntype requestBody struct {\n\tstream        *stream\n\tconn          *serverConn\n\tclosed        bool\n\tpipe          *pipe // non-nil if we have a HTTP entity message body\n\tneedsContinue bool  // need to send a 100-continue\n}\n\nfunc (b *requestBody) Close() error {\n\tif b.pipe != nil {\n\t\tb.pipe.Close(errClosedBody)\n\t}\n\tb.closed = true\n\treturn nil\n}\n\nfunc (b *requestBody) Read(p []byte) (n int, err error) {\n\tif b.needsContinue {\n\t\tb.needsContinue = false\n\t\tb.conn.write100ContinueHeaders(b.stream)\n\t}\n\tif b.pipe == nil {\n\t\treturn 0, io.EOF\n\t}\n\tn, err = b.pipe.Read(p)\n\tif n > 0 {\n\t\tb.conn.noteBodyReadFromHandler(b.stream, n)\n\t}\n\treturn\n}\n\n// responseWriter is the http.ResponseWriter implementation.  It's\n// intentionally small (1 pointer wide) to minimize garbage.  The\n// responseWriterState pointer inside is zeroed at the end of a\n// request (in handlerDone) and calls on the responseWriter thereafter\n// simply crash (caller's mistake), but the much larger responseWriterState\n// and buffers are reused between multiple requests.\ntype responseWriter struct {\n\trws *responseWriterState\n}\n\n// Optional http.ResponseWriter interfaces implemented.\nvar (\n\t_ http.CloseNotifier = (*responseWriter)(nil)\n\t_ http.Flusher       = (*responseWriter)(nil)\n\t_ stringWriter       = (*responseWriter)(nil)\n)\n\ntype responseWriterState struct {\n\t// immutable within a request:\n\tstream *stream\n\treq    *http.Request\n\tbody   *requestBody // to close at end of request, if DATA frames didn't\n\tconn   *serverConn\n\n\t// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc\n\tbw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}\n\n\t// mutated by http.Handler goroutine:\n\thandlerHeader http.Header // nil until called\n\tsnapHeader    http.Header // snapshot of handlerHeader at WriteHeader time\n\tstatus        int         // status code passed to WriteHeader\n\twroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.\n\tsentHeader    bool        // have we sent the header frame?\n\thandlerDone   bool        // handler has finished\n\tcurWrite      writeData\n\tframeWriteCh  chan error // re-used whenever we need to block on a frame being written\n\n\tcloseNotifierMu sync.Mutex // guards closeNotifierCh\n\tcloseNotifierCh chan bool  // nil until first used\n}\n\ntype chunkWriter struct{ rws *responseWriterState }\n\nfunc (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }\n\n// writeChunk writes chunks from the bufio.Writer. But because\n// bufio.Writer may bypass its chunking, sometimes p may be\n// arbitrarily large.\n//\n// writeChunk is also responsible (on the first chunk) for sending the\n// HEADER response.\nfunc (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {\n\tif !rws.wroteHeader {\n\t\trws.writeHeader(200)\n\t}\n\tif !rws.sentHeader {\n\t\trws.sentHeader = true\n\t\tvar ctype, clen string // implicit ones, if we can calculate it\n\t\tif rws.handlerDone && rws.snapHeader.Get(\"Content-Length\") == \"\" {\n\t\t\tclen = strconv.Itoa(len(p))\n\t\t}\n\t\tif rws.snapHeader.Get(\"Content-Type\") == \"\" {\n\t\t\tctype = http.DetectContentType(p)\n\t\t}\n\t\tendStream := rws.handlerDone && len(p) == 0\n\t\trws.conn.writeHeaders(rws.stream, &writeResHeaders{\n\t\t\tstreamID:      rws.stream.id,\n\t\t\thttpResCode:   rws.status,\n\t\t\th:             rws.snapHeader,\n\t\t\tendStream:     endStream,\n\t\t\tcontentType:   ctype,\n\t\t\tcontentLength: clen,\n\t\t}, rws.frameWriteCh)\n\t\tif endStream {\n\t\t\treturn 0, nil\n\t\t}\n\t}\n\tif len(p) == 0 && !rws.handlerDone {\n\t\treturn 0, nil\n\t}\n\tcurWrite := &rws.curWrite\n\tcurWrite.streamID = rws.stream.id\n\tcurWrite.p = p\n\tcurWrite.endStream = rws.handlerDone\n\tif err := rws.conn.writeDataFromHandler(rws.stream, curWrite, rws.frameWriteCh); err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(p), nil\n}\n\nfunc (w *responseWriter) Flush() {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"Header called after Handler finished\")\n\t}\n\tif rws.bw.Buffered() > 0 {\n\t\tif err := rws.bw.Flush(); err != nil {\n\t\t\t// Ignore the error. The frame writer already knows.\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t// The bufio.Writer won't call chunkWriter.Write\n\t\t// (writeChunk with zero bytes, so we have to do it\n\t\t// ourselves to force the HTTP response header and/or\n\t\t// final DATA frame (with END_STREAM) to be sent.\n\t\trws.writeChunk(nil)\n\t}\n}\n\nfunc (w *responseWriter) CloseNotify() <-chan bool {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"CloseNotify called after Handler finished\")\n\t}\n\trws.closeNotifierMu.Lock()\n\tch := rws.closeNotifierCh\n\tif ch == nil {\n\t\tch = make(chan bool, 1)\n\t\trws.closeNotifierCh = ch\n\t\tgo func() {\n\t\t\trws.stream.cw.Wait() // wait for close\n\t\t\tch <- true\n\t\t}()\n\t}\n\trws.closeNotifierMu.Unlock()\n\treturn ch\n}\n\nfunc (w *responseWriter) Header() http.Header {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"Header called after Handler finished\")\n\t}\n\tif rws.handlerHeader == nil {\n\t\trws.handlerHeader = make(http.Header)\n\t}\n\treturn rws.handlerHeader\n}\n\nfunc (w *responseWriter) WriteHeader(code int) {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"WriteHeader called after Handler finished\")\n\t}\n\trws.writeHeader(code)\n}\n\nfunc (rws *responseWriterState) writeHeader(code int) {\n\tif !rws.wroteHeader {\n\t\trws.wroteHeader = true\n\t\trws.status = code\n\t\tif len(rws.handlerHeader) > 0 {\n\t\t\trws.snapHeader = cloneHeader(rws.handlerHeader)\n\t\t}\n\t}\n}\n\nfunc cloneHeader(h http.Header) http.Header {\n\th2 := make(http.Header, len(h))\n\tfor k, vv := range h {\n\t\tvv2 := make([]string, len(vv))\n\t\tcopy(vv2, vv)\n\t\th2[k] = vv2\n\t}\n\treturn h2\n}\n\n// The Life Of A Write is like this:\n//\n// * Handler calls w.Write or w.WriteString ->\n// * -> rws.bw (*bufio.Writer) ->\n// * (Handler migth call Flush)\n// * -> chunkWriter{rws}\n// * -> responseWriterState.writeChunk(p []byte)\n// * -> responseWriterState.writeChunk (most of the magic; see comment there)\nfunc (w *responseWriter) Write(p []byte) (n int, err error) {\n\treturn w.write(len(p), p, \"\")\n}\n\nfunc (w *responseWriter) WriteString(s string) (n int, err error) {\n\treturn w.write(len(s), nil, s)\n}\n\n// either dataB or dataS is non-zero.\nfunc (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"Write called after Handler finished\")\n\t}\n\tif !rws.wroteHeader {\n\t\tw.WriteHeader(200)\n\t}\n\tif dataB != nil {\n\t\treturn rws.bw.Write(dataB)\n\t} else {\n\t\treturn rws.bw.WriteString(dataS)\n\t}\n}\n\nfunc (w *responseWriter) handlerDone() {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"handlerDone called twice\")\n\t}\n\trws.handlerDone = true\n\tw.Flush()\n\tw.rws = nil\n\tresponseWriterStatePool.Put(rws)\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/transport.go",
    "content": "// Copyright 2015 The Go Authors.\n// See https://go.googlesource.com/go/+/master/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://go.googlesource.com/go/+/master/LICENSE\n\npackage http2\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/bradfitz/http2/hpack\"\n)\n\ntype Transport struct {\n\tFallback http.RoundTripper\n\n\t// TODO: remove this and make more general with a TLS dial hook, like http\n\tInsecureTLSDial bool\n\n\tconnMu sync.Mutex\n\tconns  map[string][]*clientConn // key is host:port\n}\n\ntype clientConn struct {\n\tt        *Transport\n\ttconn    *tls.Conn\n\ttlsState *tls.ConnectionState\n\tconnKey  []string // key(s) this connection is cached in, in t.conns\n\n\treaderDone chan struct{} // closed on error\n\treaderErr  error         // set before readerDone is closed\n\thdec       *hpack.Decoder\n\tnextRes    *http.Response\n\n\tmu           sync.Mutex\n\tclosed       bool\n\tgoAway       *GoAwayFrame // if non-nil, the GoAwayFrame we received\n\tstreams      map[uint32]*clientStream\n\tnextStreamID uint32\n\tbw           *bufio.Writer\n\twerr         error // first write error that has occurred\n\tbr           *bufio.Reader\n\tfr           *Framer\n\t// Settings from peer:\n\tmaxFrameSize         uint32\n\tmaxConcurrentStreams uint32\n\tinitialWindowSize    uint32\n\thbuf                 bytes.Buffer // HPACK encoder writes into this\n\thenc                 *hpack.Encoder\n}\n\ntype clientStream struct {\n\tID   uint32\n\tresc chan resAndError\n\tpw   *io.PipeWriter\n\tpr   *io.PipeReader\n}\n\ntype stickyErrWriter struct {\n\tw   io.Writer\n\terr *error\n}\n\nfunc (sew stickyErrWriter) Write(p []byte) (n int, err error) {\n\tif *sew.err != nil {\n\t\treturn 0, *sew.err\n\t}\n\tn, err = sew.w.Write(p)\n\t*sew.err = err\n\treturn\n}\n\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tif req.URL.Scheme != \"https\" {\n\t\tif t.Fallback == nil {\n\t\t\treturn nil, errors.New(\"http2: unsupported scheme and no Fallback\")\n\t\t}\n\t\treturn t.Fallback.RoundTrip(req)\n\t}\n\n\thost, port, err := net.SplitHostPort(req.URL.Host)\n\tif err != nil {\n\t\thost = req.URL.Host\n\t\tport = \"443\"\n\t}\n\n\tfor {\n\t\tcc, err := t.getClientConn(host, port)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres, err := cc.roundTrip(req)\n\t\tif shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)?\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn res, nil\n\t}\n}\n\n// CloseIdleConnections closes any connections which were previously\n// connected from previous requests but are now sitting idle.\n// It does not interrupt any connections currently in use.\nfunc (t *Transport) CloseIdleConnections() {\n\tt.connMu.Lock()\n\tdefer t.connMu.Unlock()\n\tfor _, vv := range t.conns {\n\t\tfor _, cc := range vv {\n\t\t\tcc.closeIfIdle()\n\t\t}\n\t}\n}\n\nvar errClientConnClosed = errors.New(\"http2: client conn is closed\")\n\nfunc shouldRetryRequest(err error) bool {\n\t// TODO: or GOAWAY graceful shutdown stuff\n\treturn err == errClientConnClosed\n}\n\nfunc (t *Transport) removeClientConn(cc *clientConn) {\n\tt.connMu.Lock()\n\tdefer t.connMu.Unlock()\n\tfor _, key := range cc.connKey {\n\t\tvv, ok := t.conns[key]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tnewList := filterOutClientConn(vv, cc)\n\t\tif len(newList) > 0 {\n\t\t\tt.conns[key] = newList\n\t\t} else {\n\t\t\tdelete(t.conns, key)\n\t\t}\n\t}\n}\n\nfunc filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn {\n\tout := in[:0]\n\tfor _, v := range in {\n\t\tif v != exclude {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out\n}\n\nfunc (t *Transport) getClientConn(host, port string) (*clientConn, error) {\n\tt.connMu.Lock()\n\tdefer t.connMu.Unlock()\n\n\tkey := net.JoinHostPort(host, port)\n\n\tfor _, cc := range t.conns[key] {\n\t\tif cc.canTakeNewRequest() {\n\t\t\treturn cc, nil\n\t\t}\n\t}\n\tif t.conns == nil {\n\t\tt.conns = make(map[string][]*clientConn)\n\t}\n\tcc, err := t.newClientConn(host, port, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.conns[key] = append(t.conns[key], cc)\n\treturn cc, nil\n}\n\nfunc (t *Transport) newClientConn(host, port, key string) (*clientConn, error) {\n\tcfg := &tls.Config{\n\t\tServerName:         host,\n\t\tNextProtos:         []string{NextProtoTLS},\n\t\tInsecureSkipVerify: t.InsecureTLSDial,\n\t}\n\ttconn, err := tls.Dial(\"tcp\", host+\":\"+port, cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := tconn.Handshake(); err != nil {\n\t\treturn nil, err\n\t}\n\tif !t.InsecureTLSDial {\n\t\tif err := tconn.VerifyHostname(cfg.ServerName); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tstate := tconn.ConnectionState()\n\tif p := state.NegotiatedProtocol; p != NextProtoTLS {\n\t\t// TODO(bradfitz): fall back to Fallback\n\t\treturn nil, fmt.Errorf(\"bad protocol: %v\", p)\n\t}\n\tif !state.NegotiatedProtocolIsMutual {\n\t\treturn nil, errors.New(\"could not negotiate protocol mutually\")\n\t}\n\tif _, err := tconn.Write(clientPreface); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcc := &clientConn{\n\t\tt:                    t,\n\t\ttconn:                tconn,\n\t\tconnKey:              []string{key}, // TODO: cert's validated hostnames too\n\t\ttlsState:             &state,\n\t\treaderDone:           make(chan struct{}),\n\t\tnextStreamID:         1,\n\t\tmaxFrameSize:         16 << 10, // spec default\n\t\tinitialWindowSize:    65535,    // spec default\n\t\tmaxConcurrentStreams: 1000,     // \"infinite\", per spec. 1000 seems good enough.\n\t\tstreams:              make(map[uint32]*clientStream),\n\t}\n\tcc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr})\n\tcc.br = bufio.NewReader(tconn)\n\tcc.fr = NewFramer(cc.bw, cc.br)\n\tcc.henc = hpack.NewEncoder(&cc.hbuf)\n\n\tcc.fr.WriteSettings()\n\t// TODO: re-send more conn-level flow control tokens when server uses all these.\n\tcc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs?\n\tcc.bw.Flush()\n\tif cc.werr != nil {\n\t\treturn nil, cc.werr\n\t}\n\n\t// Read the obligatory SETTINGS frame\n\tf, err := cc.fr.ReadFrame()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsf, ok := f.(*SettingsFrame)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected settings frame, got: %T\", f)\n\t}\n\tcc.fr.WriteSettingsAck()\n\tcc.bw.Flush()\n\n\tsf.ForeachSetting(func(s Setting) error {\n\t\tswitch s.ID {\n\t\tcase SettingMaxFrameSize:\n\t\t\tcc.maxFrameSize = s.Val\n\t\tcase SettingMaxConcurrentStreams:\n\t\t\tcc.maxConcurrentStreams = s.Val\n\t\tcase SettingInitialWindowSize:\n\t\t\tcc.initialWindowSize = s.Val\n\t\tdefault:\n\t\t\t// TODO(bradfitz): handle more\n\t\t\tlog.Printf(\"Unhandled Setting: %v\", s)\n\t\t}\n\t\treturn nil\n\t})\n\t// TODO: figure out henc size\n\tcc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField)\n\n\tgo cc.readLoop()\n\treturn cc, nil\n}\n\nfunc (cc *clientConn) setGoAway(f *GoAwayFrame) {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tcc.goAway = f\n}\n\nfunc (cc *clientConn) canTakeNewRequest() bool {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\treturn cc.goAway == nil &&\n\t\tint64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&\n\t\tcc.nextStreamID < 2147483647\n}\n\nfunc (cc *clientConn) closeIfIdle() {\n\tcc.mu.Lock()\n\tif len(cc.streams) > 0 {\n\t\tcc.mu.Unlock()\n\t\treturn\n\t}\n\tcc.closed = true\n\t// TODO: do clients send GOAWAY too? maybe? Just Close:\n\tcc.mu.Unlock()\n\n\tcc.tconn.Close()\n}\n\nfunc (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) {\n\tcc.mu.Lock()\n\n\tif cc.closed {\n\t\tcc.mu.Unlock()\n\t\treturn nil, errClientConnClosed\n\t}\n\n\tcs := cc.newStream()\n\thasBody := false // TODO\n\n\t// we send: HEADERS[+CONTINUATION] + (DATA?)\n\thdrs := cc.encodeHeaders(req)\n\tfirst := true\n\tfor len(hdrs) > 0 {\n\t\tchunk := hdrs\n\t\tif len(chunk) > int(cc.maxFrameSize) {\n\t\t\tchunk = chunk[:cc.maxFrameSize]\n\t\t}\n\t\thdrs = hdrs[len(chunk):]\n\t\tendHeaders := len(hdrs) == 0\n\t\tif first {\n\t\t\tcc.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\tStreamID:      cs.ID,\n\t\t\t\tBlockFragment: chunk,\n\t\t\t\tEndStream:     !hasBody,\n\t\t\t\tEndHeaders:    endHeaders,\n\t\t\t})\n\t\t\tfirst = false\n\t\t} else {\n\t\t\tcc.fr.WriteContinuation(cs.ID, endHeaders, chunk)\n\t\t}\n\t}\n\tcc.bw.Flush()\n\twerr := cc.werr\n\tcc.mu.Unlock()\n\n\tif hasBody {\n\t\t// TODO: write data. and it should probably be interleaved:\n\t\t//   go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc\n\t}\n\n\tif werr != nil {\n\t\treturn nil, werr\n\t}\n\n\tre := <-cs.resc\n\tif re.err != nil {\n\t\treturn nil, re.err\n\t}\n\tres := re.res\n\tres.Request = req\n\tres.TLS = cc.tlsState\n\treturn res, nil\n}\n\n// requires cc.mu be held.\nfunc (cc *clientConn) encodeHeaders(req *http.Request) []byte {\n\tcc.hbuf.Reset()\n\n\t// TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go\n\thost := req.Host\n\tif host == \"\" {\n\t\thost = req.URL.Host\n\t}\n\n\tpath := req.URL.Path\n\tif path == \"\" {\n\t\tpath = \"/\"\n\t}\n\n\tcc.writeHeader(\":authority\", host) // probably not right for all sites\n\tcc.writeHeader(\":method\", req.Method)\n\tcc.writeHeader(\":path\", path)\n\tcc.writeHeader(\":scheme\", \"https\")\n\n\tfor k, vv := range req.Header {\n\t\tlowKey := strings.ToLower(k)\n\t\tif lowKey == \"host\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tcc.writeHeader(lowKey, v)\n\t\t}\n\t}\n\treturn cc.hbuf.Bytes()\n}\n\nfunc (cc *clientConn) writeHeader(name, value string) {\n\tlog.Printf(\"sending %q = %q\", name, value)\n\tcc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})\n}\n\ntype resAndError struct {\n\tres *http.Response\n\terr error\n}\n\n// requires cc.mu be held.\nfunc (cc *clientConn) newStream() *clientStream {\n\tcs := &clientStream{\n\t\tID:   cc.nextStreamID,\n\t\tresc: make(chan resAndError, 1),\n\t}\n\tcc.nextStreamID += 2\n\tcc.streams[cs.ID] = cs\n\treturn cs\n}\n\nfunc (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tcs := cc.streams[id]\n\tif andRemove {\n\t\tdelete(cc.streams, id)\n\t}\n\treturn cs\n}\n\n// runs in its own goroutine.\nfunc (cc *clientConn) readLoop() {\n\tdefer cc.t.removeClientConn(cc)\n\tdefer close(cc.readerDone)\n\n\tactiveRes := map[uint32]*clientStream{} // keyed by streamID\n\t// Close any response bodies if the server closes prematurely.\n\t// TODO: also do this if we've written the headers but not\n\t// gotten a response yet.\n\tdefer func() {\n\t\terr := cc.readerErr\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\tfor _, cs := range activeRes {\n\t\t\tcs.pw.CloseWithError(err)\n\t\t}\n\t}()\n\n\t// continueStreamID is the stream ID we're waiting for\n\t// continuation frames for.\n\tvar continueStreamID uint32\n\n\tfor {\n\t\tf, err := cc.fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tcc.readerErr = err\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"Transport received %v: %#v\", f.Header(), f)\n\n\t\tstreamID := f.Header().StreamID\n\n\t\t_, isContinue := f.(*ContinuationFrame)\n\t\tif isContinue {\n\t\t\tif streamID != continueStreamID {\n\t\t\t\tlog.Printf(\"Protocol violation: got CONTINUATION with id %d; want %d\", streamID, continueStreamID)\n\t\t\t\tcc.readerErr = ConnectionError(ErrCodeProtocol)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else if continueStreamID != 0 {\n\t\t\t// Continue frames need to be adjacent in the stream\n\t\t\t// and we were in the middle of headers.\n\t\t\tlog.Printf(\"Protocol violation: got %T for stream %d, want CONTINUATION for %d\", f, streamID, continueStreamID)\n\t\t\tcc.readerErr = ConnectionError(ErrCodeProtocol)\n\t\t\treturn\n\t\t}\n\n\t\tif streamID%2 == 0 {\n\t\t\t// Ignore streams pushed from the server for now.\n\t\t\t// These always have an even stream id.\n\t\t\tcontinue\n\t\t}\n\t\tstreamEnded := false\n\t\tif ff, ok := f.(streamEnder); ok {\n\t\t\tstreamEnded = ff.StreamEnded()\n\t\t}\n\n\t\tcs := cc.streamByID(streamID, streamEnded)\n\t\tif cs == nil {\n\t\t\tlog.Printf(\"Received frame for untracked stream ID %d\", streamID)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch f := f.(type) {\n\t\tcase *HeadersFrame:\n\t\t\tcc.nextRes = &http.Response{\n\t\t\t\tProto:      \"HTTP/2.0\",\n\t\t\t\tProtoMajor: 2,\n\t\t\t\tHeader:     make(http.Header),\n\t\t\t}\n\t\t\tcs.pr, cs.pw = io.Pipe()\n\t\t\tcc.hdec.Write(f.HeaderBlockFragment())\n\t\tcase *ContinuationFrame:\n\t\t\tcc.hdec.Write(f.HeaderBlockFragment())\n\t\tcase *DataFrame:\n\t\t\tlog.Printf(\"DATA: %q\", f.Data())\n\t\t\tcs.pw.Write(f.Data())\n\t\tcase *GoAwayFrame:\n\t\t\tcc.t.removeClientConn(cc)\n\t\t\tif f.ErrCode != 0 {\n\t\t\t\t// TODO: deal with GOAWAY more. particularly the error code\n\t\t\t\tlog.Printf(\"transport got GOAWAY with error code = %v\", f.ErrCode)\n\t\t\t}\n\t\t\tcc.setGoAway(f)\n\t\tdefault:\n\t\t\tlog.Printf(\"Transport: unhandled response frame type %T\", f)\n\t\t}\n\t\theadersEnded := false\n\t\tif he, ok := f.(headersEnder); ok {\n\t\t\theadersEnded = he.HeadersEnded()\n\t\t\tif headersEnded {\n\t\t\t\tcontinueStreamID = 0\n\t\t\t} else {\n\t\t\t\tcontinueStreamID = streamID\n\t\t\t}\n\t\t}\n\n\t\tif streamEnded {\n\t\t\tcs.pw.Close()\n\t\t\tdelete(activeRes, streamID)\n\t\t}\n\t\tif headersEnded {\n\t\t\tif cs == nil {\n\t\t\t\tpanic(\"couldn't find stream\") // TODO be graceful\n\t\t\t}\n\t\t\t// TODO: set the Body to one which notes the\n\t\t\t// Close and also sends the server a\n\t\t\t// RST_STREAM\n\t\t\tcc.nextRes.Body = cs.pr\n\t\t\tres := cc.nextRes\n\t\t\tactiveRes[streamID] = cs\n\t\t\tcs.resc <- resAndError{res: res}\n\t\t}\n\t}\n}\n\nfunc (cc *clientConn) onNewHeaderField(f hpack.HeaderField) {\n\t// TODO: verifiy pseudo headers come before non-pseudo headers\n\t// TODO: verifiy the status is set\n\tlog.Printf(\"Header field: %+v\", f)\n\tif f.Name == \":status\" {\n\t\tcode, err := strconv.Atoi(f.Value)\n\t\tif err != nil {\n\t\t\tpanic(\"TODO: be graceful\")\n\t\t}\n\t\tcc.nextRes.Status = f.Value + \" \" + http.StatusText(code)\n\t\tcc.nextRes.StatusCode = code\n\t\treturn\n\t}\n\tif strings.HasPrefix(f.Name, \":\") {\n\t\t// \"Endpoints MUST NOT generate pseudo-header fields other than those defined in this document.\"\n\t\t// TODO: treat as invalid?\n\t\treturn\n\t}\n\tcc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value)\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/write.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/bradfitz/http2/hpack\"\n)\n\n// writeFramer is implemented by any type that is used to write frames.\ntype writeFramer interface {\n\twriteFrame(writeContext) error\n}\n\n// writeContext is the interface needed by the various frame writer\n// types below. All the writeFrame methods below are scheduled via the\n// frame writing scheduler (see writeScheduler in writesched.go).\n//\n// This interface is implemented by *serverConn.\n// TODO: use it from the client code too, once it exists.\ntype writeContext interface {\n\tFramer() *Framer\n\tFlush() error\n\tCloseConn() error\n\t// HeaderEncoder returns an HPACK encoder that writes to the\n\t// returned buffer.\n\tHeaderEncoder() (*hpack.Encoder, *bytes.Buffer)\n}\n\n// endsStream reports whether the given frame writer w will locally\n// close the stream.\nfunc endsStream(w writeFramer) bool {\n\tswitch v := w.(type) {\n\tcase *writeData:\n\t\treturn v.endStream\n\tcase *writeResHeaders:\n\t\treturn v.endStream\n\t}\n\treturn false\n}\n\ntype flushFrameWriter struct{}\n\nfunc (flushFrameWriter) writeFrame(ctx writeContext) error {\n\treturn ctx.Flush()\n}\n\ntype writeSettings []Setting\n\nfunc (s writeSettings) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteSettings([]Setting(s)...)\n}\n\ntype writeGoAway struct {\n\tmaxStreamID uint32\n\tcode        ErrCode\n}\n\nfunc (p *writeGoAway) writeFrame(ctx writeContext) error {\n\terr := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)\n\tif p.code != 0 {\n\t\tctx.Flush() // ignore error: we're hanging up on them anyway\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tctx.CloseConn()\n\t}\n\treturn err\n}\n\ntype writeData struct {\n\tstreamID  uint32\n\tp         []byte\n\tendStream bool\n}\n\nfunc (w *writeData) String() string {\n\treturn fmt.Sprintf(\"writeData(stream=%d, p=%d, endStream=%v)\", w.streamID, len(w.p), w.endStream)\n}\n\nfunc (w *writeData) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteData(w.streamID, w.endStream, w.p)\n}\n\nfunc (se StreamError) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteRSTStream(se.StreamID, se.Code)\n}\n\ntype writePingAck struct{ pf *PingFrame }\n\nfunc (w writePingAck) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WritePing(true, w.pf.Data)\n}\n\ntype writeSettingsAck struct{}\n\nfunc (writeSettingsAck) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteSettingsAck()\n}\n\n// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames\n// for HTTP response headers from a server handler.\ntype writeResHeaders struct {\n\tstreamID    uint32\n\thttpResCode int\n\th           http.Header // may be nil\n\tendStream   bool\n\n\tcontentType   string\n\tcontentLength string\n}\n\nfunc (w *writeResHeaders) writeFrame(ctx writeContext) error {\n\tenc, buf := ctx.HeaderEncoder()\n\tbuf.Reset()\n\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: httpCodeString(w.httpResCode)})\n\tfor k, vv := range w.h {\n\t\tk = lowerHeader(k)\n\t\tfor _, v := range vv {\n\t\t\t// TODO: more of \"8.1.2.2 Connection-Specific Header Fields\"\n\t\t\tif k == \"transfer-encoding\" && v != \"trailers\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tenc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t\t}\n\t}\n\tif w.contentType != \"\" {\n\t\tenc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: w.contentType})\n\t}\n\tif w.contentLength != \"\" {\n\t\tenc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: w.contentLength})\n\t}\n\n\theaderBlock := buf.Bytes()\n\tif len(headerBlock) == 0 {\n\t\tpanic(\"unexpected empty hpack\")\n\t}\n\n\t// For now we're lazy and just pick the minimum MAX_FRAME_SIZE\n\t// that all peers must support (16KB). Later we could care\n\t// more and send larger frames if the peer advertised it, but\n\t// there's little point. Most headers are small anyway (so we\n\t// generally won't have CONTINUATION frames), and extra frames\n\t// only waste 9 bytes anyway.\n\tconst maxFrameSize = 16384\n\n\tfirst := true\n\tfor len(headerBlock) > 0 {\n\t\tfrag := headerBlock\n\t\tif len(frag) > maxFrameSize {\n\t\t\tfrag = frag[:maxFrameSize]\n\t\t}\n\t\theaderBlock = headerBlock[len(frag):]\n\t\tendHeaders := len(headerBlock) == 0\n\t\tvar err error\n\t\tif first {\n\t\t\tfirst = false\n\t\t\terr = ctx.Framer().WriteHeaders(HeadersFrameParam{\n\t\t\t\tStreamID:      w.streamID,\n\t\t\t\tBlockFragment: frag,\n\t\t\t\tEndStream:     w.endStream,\n\t\t\t\tEndHeaders:    endHeaders,\n\t\t\t})\n\t\t} else {\n\t\t\terr = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\ntype write100ContinueHeadersFrame struct {\n\tstreamID uint32\n}\n\nfunc (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {\n\tenc, buf := ctx.HeaderEncoder()\n\tbuf.Reset()\n\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"100\"})\n\treturn ctx.Framer().WriteHeaders(HeadersFrameParam{\n\t\tStreamID:      w.streamID,\n\t\tBlockFragment: buf.Bytes(),\n\t\tEndStream:     false,\n\t\tEndHeaders:    true,\n\t})\n}\n\ntype writeWindowUpdate struct {\n\tstreamID uint32 // or 0 for conn-level\n\tn        uint32\n}\n\nfunc (wu writeWindowUpdate) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)\n}\n"
  },
  {
    "path": "vendor/github.com/bradfitz/http2/writesched.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// See https://code.google.com/p/go/source/browse/CONTRIBUTORS\n// Licensed under the same terms as Go itself:\n// https://code.google.com/p/go/source/browse/LICENSE\n\npackage http2\n\nimport \"fmt\"\n\n// frameWriteMsg is a request to write a frame.\ntype frameWriteMsg struct {\n\t// write is the interface value that does the writing, once the\n\t// writeScheduler (below) has decided to select this frame\n\t// to write. The write functions are all defined in write.go.\n\twrite writeFramer\n\n\tstream *stream // used for prioritization. nil for non-stream frames.\n\n\t// done, if non-nil, must be a buffered channel with space for\n\t// 1 message and is sent the return value from write (or an\n\t// earlier error) when the frame has been written.\n\tdone chan error\n}\n\n// for debugging only:\nfunc (wm frameWriteMsg) String() string {\n\tvar streamID uint32\n\tif wm.stream != nil {\n\t\tstreamID = wm.stream.id\n\t}\n\tvar des string\n\tif s, ok := wm.write.(fmt.Stringer); ok {\n\t\tdes = s.String()\n\t} else {\n\t\tdes = fmt.Sprintf(\"%T\", wm.write)\n\t}\n\treturn fmt.Sprintf(\"[frameWriteMsg stream=%d, ch=%v, type: %v]\", streamID, wm.done != nil, des)\n}\n\n// writeScheduler tracks pending frames to write, priorities, and decides\n// the next one to use. It is not thread-safe.\ntype writeScheduler struct {\n\t// zero are frames not associated with a specific stream.\n\t// They're sent before any stream-specific freams.\n\tzero writeQueue\n\n\t// maxFrameSize is the maximum size of a DATA frame\n\t// we'll write. Must be non-zero and between 16K-16M.\n\tmaxFrameSize uint32\n\n\t// sq contains the stream-specific queues, keyed by stream ID.\n\t// when a stream is idle, it's deleted from the map.\n\tsq map[uint32]*writeQueue\n\n\t// canSend is a slice of memory that's reused between frame\n\t// scheduling decisions to hold the list of writeQueues (from sq)\n\t// which have enough flow control data to send. After canSend is\n\t// built, the best is selected.\n\tcanSend []*writeQueue\n\n\t// pool of empty queues for reuse.\n\tqueuePool []*writeQueue\n}\n\nfunc (ws *writeScheduler) putEmptyQueue(q *writeQueue) {\n\tif len(q.s) != 0 {\n\t\tpanic(\"queue must be empty\")\n\t}\n\tws.queuePool = append(ws.queuePool, q)\n}\n\nfunc (ws *writeScheduler) getEmptyQueue() *writeQueue {\n\tln := len(ws.queuePool)\n\tif ln == 0 {\n\t\treturn new(writeQueue)\n\t}\n\tq := ws.queuePool[ln-1]\n\tws.queuePool = ws.queuePool[:ln-1]\n\treturn q\n}\n\nfunc (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }\n\nfunc (ws *writeScheduler) add(wm frameWriteMsg) {\n\tst := wm.stream\n\tif st == nil {\n\t\tws.zero.push(wm)\n\t} else {\n\t\tws.streamQueue(st.id).push(wm)\n\t}\n}\n\nfunc (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {\n\tif q, ok := ws.sq[streamID]; ok {\n\t\treturn q\n\t}\n\tif ws.sq == nil {\n\t\tws.sq = make(map[uint32]*writeQueue)\n\t}\n\tq := ws.getEmptyQueue()\n\tws.sq[streamID] = q\n\treturn q\n}\n\n// take returns the most important frame to write and removes it from the scheduler.\n// It is illegal to call this if the scheduler is empty or if there are no connection-level\n// flow control bytes available.\nfunc (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {\n\tif ws.maxFrameSize == 0 {\n\t\tpanic(\"internal error: ws.maxFrameSize not initialized or invalid\")\n\t}\n\n\t// If there any frames not associated with streams, prefer those first.\n\t// These are usually SETTINGS, etc.\n\tif !ws.zero.empty() {\n\t\treturn ws.zero.shift(), true\n\t}\n\tif len(ws.sq) == 0 {\n\t\treturn\n\t}\n\n\t// Next, prioritize frames on streams that aren't DATA frames (no cost).\n\tfor id, q := range ws.sq {\n\t\tif q.firstIsNoCost() {\n\t\t\treturn ws.takeFrom(id, q)\n\t\t}\n\t}\n\n\t// Now, all that remains are DATA frames with non-zero bytes to\n\t// send. So pick the best one.\n\tif len(ws.canSend) != 0 {\n\t\tpanic(\"should be empty\")\n\t}\n\tfor _, q := range ws.sq {\n\t\tif n := ws.streamWritableBytes(q); n > 0 {\n\t\t\tws.canSend = append(ws.canSend, q)\n\t\t}\n\t}\n\tif len(ws.canSend) == 0 {\n\t\treturn\n\t}\n\tdefer ws.zeroCanSend()\n\n\t// TODO: find the best queue\n\tq := ws.canSend[0]\n\n\treturn ws.takeFrom(q.streamID(), q)\n}\n\n// zeroCanSend is defered from take.\nfunc (ws *writeScheduler) zeroCanSend() {\n\tfor i := range ws.canSend {\n\t\tws.canSend[i] = nil\n\t}\n\tws.canSend = ws.canSend[:0]\n}\n\n// streamWritableBytes returns the number of DATA bytes we could write\n// from the given queue's stream, if this stream/queue were\n// selected. It is an error to call this if q's head isn't a\n// *writeData.\nfunc (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {\n\twm := q.head()\n\tret := wm.stream.flow.available() // max we can write\n\tif ret == 0 {\n\t\treturn 0\n\t}\n\tif int32(ws.maxFrameSize) < ret {\n\t\tret = int32(ws.maxFrameSize)\n\t}\n\tif ret == 0 {\n\t\tpanic(\"internal error: ws.maxFrameSize not initialized or invalid\")\n\t}\n\twd := wm.write.(*writeData)\n\tif len(wd.p) < int(ret) {\n\t\tret = int32(len(wd.p))\n\t}\n\treturn ret\n}\n\nfunc (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {\n\twm = q.head()\n\t// If the first item in this queue costs flow control tokens\n\t// and we don't have enough, write as much as we can.\n\tif wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {\n\t\tallowed := wm.stream.flow.available() // max we can write\n\t\tif allowed == 0 {\n\t\t\t// No quota available. Caller can try the next stream.\n\t\t\treturn frameWriteMsg{}, false\n\t\t}\n\t\tif int32(ws.maxFrameSize) < allowed {\n\t\t\tallowed = int32(ws.maxFrameSize)\n\t\t}\n\t\t// TODO: further restrict the allowed size, because even if\n\t\t// the peer says it's okay to write 16MB data frames, we might\n\t\t// want to write smaller ones to properly weight competing\n\t\t// streams' priorities.\n\n\t\tif len(wd.p) > int(allowed) {\n\t\t\twm.stream.flow.take(allowed)\n\t\t\tchunk := wd.p[:allowed]\n\t\t\twd.p = wd.p[allowed:]\n\t\t\t// Make up a new write message of a valid size, rather\n\t\t\t// than shifting one off the queue.\n\t\t\treturn frameWriteMsg{\n\t\t\t\tstream: wm.stream,\n\t\t\t\twrite: &writeData{\n\t\t\t\t\tstreamID: wd.streamID,\n\t\t\t\t\tp:        chunk,\n\t\t\t\t\t// even if the original had endStream set, there\n\t\t\t\t\t// arebytes remaining because len(wd.p) > allowed,\n\t\t\t\t\t// so we know endStream is false:\n\t\t\t\t\tendStream: false,\n\t\t\t\t},\n\t\t\t\t// our caller is blocking on the final DATA frame, not\n\t\t\t\t// these intermediates, so no need to wait:\n\t\t\t\tdone: nil,\n\t\t\t}, true\n\t\t}\n\t\twm.stream.flow.take(int32(len(wd.p)))\n\t}\n\n\tq.shift()\n\tif q.empty() {\n\t\tws.putEmptyQueue(q)\n\t\tdelete(ws.sq, id)\n\t}\n\treturn wm, true\n}\n\nfunc (ws *writeScheduler) forgetStream(id uint32) {\n\tq, ok := ws.sq[id]\n\tif !ok {\n\t\treturn\n\t}\n\tdelete(ws.sq, id)\n\n\t// But keep it for others later.\n\tfor i := range q.s {\n\t\tq.s[i] = frameWriteMsg{}\n\t}\n\tq.s = q.s[:0]\n\tws.putEmptyQueue(q)\n}\n\ntype writeQueue struct {\n\ts []frameWriteMsg\n}\n\n// streamID returns the stream ID for a non-empty stream-specific queue.\nfunc (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }\n\nfunc (q *writeQueue) empty() bool { return len(q.s) == 0 }\n\nfunc (q *writeQueue) push(wm frameWriteMsg) {\n\tq.s = append(q.s, wm)\n}\n\n// head returns the next item that would be removed by shift.\nfunc (q *writeQueue) head() frameWriteMsg {\n\tif len(q.s) == 0 {\n\t\tpanic(\"invalid use of queue\")\n\t}\n\treturn q.s[0]\n}\n\nfunc (q *writeQueue) shift() frameWriteMsg {\n\tif len(q.s) == 0 {\n\t\tpanic(\"invalid use of queue\")\n\t}\n\twm := q.s[0]\n\t// TODO: less copy-happy queue.\n\tcopy(q.s, q.s[1:])\n\tq.s[len(q.s)-1] = frameWriteMsg{}\n\tq.s = q.s[:len(q.s)-1]\n\treturn wm\n}\n\nfunc (q *writeQueue) firstIsNoCost() bool {\n\tif df, ok := q.s[0].write.(*writeData); ok {\n\t\treturn len(df.p) == 0\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/golang/glog/LICENSE",
    "content": "Apache License\nVersion 2.0, January 2004\nhttp://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n\"License\" shall mean the terms and conditions for use, reproduction, and\ndistribution as defined by Sections 1 through 9 of this document.\n\n\"Licensor\" shall mean the copyright owner or entity authorized by the copyright\nowner that is granting the License.\n\n\"Legal Entity\" shall mean the union of the acting entity and all other entities\nthat control, are controlled by, or are under common control with that entity.\nFor the purposes of this definition, \"control\" means (i) the power, direct or\nindirect, to cause the direction or management of such entity, whether by\ncontract or otherwise, or (ii) ownership of fifty percent (50%) or more of the\noutstanding shares, or (iii) beneficial ownership of such entity.\n\n\"You\" (or \"Your\") shall mean an individual or Legal Entity exercising\npermissions granted by this License.\n\n\"Source\" form shall mean the preferred form for making modifications, including\nbut not limited to software source code, documentation source, and configuration\nfiles.\n\n\"Object\" form shall mean any form resulting from mechanical transformation or\ntranslation of a Source form, including but not limited to compiled object code,\ngenerated documentation, and conversions to other media types.\n\n\"Work\" shall mean the work of authorship, whether in Source or Object form, made\navailable under the License, as indicated by a copyright notice that is included\nin or attached to the work (an example is provided in the Appendix below).\n\n\"Derivative Works\" shall mean any work, whether in Source or Object form, that\nis based on (or derived from) the Work and for which the editorial revisions,\nannotations, elaborations, or other modifications represent, as a whole, an\noriginal work of authorship. For the purposes of this License, Derivative Works\nshall not include works that remain separable from, or merely link (or bind by\nname) to the interfaces of, the Work and Derivative Works thereof.\n\n\"Contribution\" shall mean any work of authorship, including the original version\nof the Work and any modifications or additions to that Work or Derivative Works\nthereof, that is intentionally submitted to Licensor for inclusion in the Work\nby the copyright owner or by an individual or Legal Entity authorized to submit\non behalf of the copyright owner. For the purposes of this definition,\n\"submitted\" means any form of electronic, verbal, or written communication sent\nto the Licensor or its representatives, including but not limited to\ncommunication on electronic mailing lists, source code control systems, and\nissue tracking systems that are managed by, or on behalf of, the Licensor for\nthe purpose of discussing and improving the Work, but excluding communication\nthat is conspicuously marked or otherwise designated in writing by the copyright\nowner as \"Not a Contribution.\"\n\n\"Contributor\" shall mean Licensor and any individual or Legal Entity on behalf\nof whom a Contribution has been received by Licensor and subsequently\nincorporated within the Work.\n\n2. Grant of Copyright License.\n\nSubject to the terms and conditions of this License, each Contributor hereby\ngrants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,\nirrevocable copyright license to reproduce, prepare Derivative Works of,\npublicly display, publicly perform, sublicense, and distribute the Work and such\nDerivative Works in Source or Object form.\n\n3. Grant of Patent License.\n\nSubject to the terms and conditions of this License, each Contributor hereby\ngrants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,\nirrevocable (except as stated in this section) patent license to make, have\nmade, use, offer to sell, sell, import, and otherwise transfer the Work, where\nsuch license applies only to those patent claims licensable by such Contributor\nthat are necessarily infringed by their Contribution(s) alone or by combination\nof their Contribution(s) with the Work to which such Contribution(s) was\nsubmitted. If You institute patent litigation against any entity (including a\ncross-claim or counterclaim in a lawsuit) alleging that the Work or a\nContribution incorporated within the Work constitutes direct or contributory\npatent infringement, then any patent licenses granted to You under this License\nfor that Work shall terminate as of the date such litigation is filed.\n\n4. Redistribution.\n\nYou may reproduce and distribute copies of the Work or Derivative Works thereof\nin any medium, with or without modifications, and in Source or Object form,\nprovided that You meet the following conditions:\n\nYou must give any other recipients of the Work or Derivative Works a copy of\nthis License; and\nYou must cause any modified files to carry prominent notices stating that You\nchanged the files; and\nYou must retain, in the Source form of any Derivative Works that You distribute,\nall copyright, patent, trademark, and attribution notices from the Source form\nof the Work, excluding those notices that do not pertain to any part of the\nDerivative Works; and\nIf the Work includes a \"NOTICE\" text file as part of its distribution, then any\nDerivative Works that You distribute must include a readable copy of the\nattribution notices contained within such NOTICE file, excluding those notices\nthat do not pertain to any part of the Derivative Works, in at least one of the\nfollowing places: within a NOTICE text file distributed as part of the\nDerivative Works; within the Source form or documentation, if provided along\nwith the Derivative Works; or, within a display generated by the Derivative\nWorks, if and wherever such third-party notices normally appear. The contents of\nthe NOTICE file are for informational purposes only and do not modify the\nLicense. You may add Your own attribution notices within Derivative Works that\nYou distribute, alongside or as an addendum to the NOTICE text from the Work,\nprovided that such additional attribution notices cannot be construed as\nmodifying the License.\nYou may add Your own copyright statement to Your modifications and may provide\nadditional or different license terms and conditions for use, reproduction, or\ndistribution of Your modifications, or for any such Derivative Works as a whole,\nprovided Your use, reproduction, and distribution of the Work otherwise complies\nwith the conditions stated in this License.\n\n5. Submission of Contributions.\n\nUnless You explicitly state otherwise, any Contribution intentionally submitted\nfor inclusion in the Work by You to the Licensor shall be under the terms and\nconditions of this License, without any additional terms or conditions.\nNotwithstanding the above, nothing herein shall supersede or modify the terms of\nany separate license agreement you may have executed with Licensor regarding\nsuch Contributions.\n\n6. Trademarks.\n\nThis License does not grant permission to use the trade names, trademarks,\nservice marks, or product names of the Licensor, except as required for\nreasonable and customary use in describing the origin of the Work and\nreproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty.\n\nUnless required by applicable law or agreed to in writing, Licensor provides the\nWork (and each Contributor provides its Contributions) on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,\nincluding, without limitation, any warranties or conditions of TITLE,\nNON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are\nsolely responsible for determining the appropriateness of using or\nredistributing the Work and assume any risks associated with Your exercise of\npermissions under this License.\n\n8. Limitation of Liability.\n\nIn no event and under no legal theory, whether in tort (including negligence),\ncontract, or otherwise, unless required by applicable law (such as deliberate\nand grossly negligent acts) or agreed to in writing, shall any Contributor be\nliable to You for damages, including any direct, indirect, special, incidental,\nor consequential damages of any character arising as a result of this License or\nout of the use or inability to use the Work (including but not limited to\ndamages for loss of goodwill, work stoppage, computer failure or malfunction, or\nany and all other commercial damages or losses), even if such Contributor has\nbeen advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability.\n\nWhile redistributing the Work or Derivative Works thereof, You may choose to\noffer, and charge a fee for, acceptance of support, warranty, indemnity, or\nother liability obligations and/or rights consistent with this License. However,\nin accepting such obligations, You may act only on Your own behalf and on Your\nsole responsibility, not on behalf of any other Contributor, and only if You\nagree to indemnify, defend, and hold each Contributor harmless for any liability\nincurred by, or claims asserted against, such Contributor by reason of your\naccepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work\n\nTo apply the Apache License to your work, attach the following boilerplate\nnotice, with the fields enclosed by brackets \"[]\" replaced with your own\nidentifying information. (Don't include the brackets!) The text should be\nenclosed in the appropriate comment syntax for the file format. We also\nrecommend that a file or class name and description of purpose be included on\nthe same \"printed page\" as the copyright notice for easier identification within\nthird-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/golang/glog/README",
    "content": "glog\n====\n\nLeveled execution logs for Go.\n\nThis is an efficient pure Go implementation of leveled logs in the\nmanner of the open source C++ package\n\thttp://code.google.com/p/google-glog\n\nBy binding methods to booleans it is possible to use the log package\nwithout paying the expense of evaluating the arguments to the log.\nThrough the -vmodule flag, the package also provides fine-grained\ncontrol over logging at the file level.\n\nThe comment from glog.go introduces the ideas:\n\n\tPackage glog implements logging analogous to the Google-internal\n\tC++ INFO/ERROR/V setup.  It provides functions Info, Warning,\n\tError, Fatal, plus formatting variants such as Infof. It\n\talso provides V-style logging controlled by the -v and\n\t-vmodule=file=2 flags.\n\t\n\tBasic examples:\n\t\n\t\tglog.Info(\"Prepare to repel boarders\")\n\t\n\t\tglog.Fatalf(\"Initialization failed: %s\", err)\n\t\n\tSee the documentation for the V function for an explanation\n\tof these examples:\n\t\n\t\tif glog.V(2) {\n\t\t\tglog.Info(\"Starting transaction...\")\n\t\t}\n\t\n\t\tglog.V(2).Infoln(\"Processed\", nItems, \"elements\")\n\n\nThe repository contains an open source version of the log package\nused inside Google. The master copy of the source lives inside\nGoogle, not here. The code in this repo is for export only and is not itself\nunder development. Feature requests will be ignored.\n\nSend bug reports to golang-nuts@googlegroups.com.\n"
  },
  {
    "path": "vendor/github.com/golang/glog/glog.go",
    "content": "// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/\n//\n// Copyright 2013 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.\n// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as\n// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.\n//\n// Basic examples:\n//\n//\tglog.Info(\"Prepare to repel boarders\")\n//\n//\tglog.Fatalf(\"Initialization failed: %s\", err)\n//\n// See the documentation for the V function for an explanation of these examples:\n//\n//\tif glog.V(2) {\n//\t\tglog.Info(\"Starting transaction...\")\n//\t}\n//\n//\tglog.V(2).Infoln(\"Processed\", nItems, \"elements\")\n//\n// Log output is buffered and written periodically using Flush. Programs\n// should call Flush before exiting to guarantee all log output is written.\n//\n// By default, all log statements write to files in a temporary directory.\n// This package provides several flags that modify this behavior.\n// As a result, flag.Parse must be called before any logging is done.\n//\n//\t-logtostderr=false\n//\t\tLogs are written to standard error instead of to files.\n//\t-alsologtostderr=false\n//\t\tLogs are written to standard error as well as to files.\n//\t-stderrthreshold=ERROR\n//\t\tLog events at or above this severity are logged to standard\n//\t\terror as well as to files.\n//\t-log_dir=\"\"\n//\t\tLog files will be written to this directory instead of the\n//\t\tdefault temporary directory.\n//\n//\tOther flags provide aids to debugging.\n//\n//\t-log_backtrace_at=\"\"\n//\t\tWhen set to a file and line number holding a logging statement,\n//\t\tsuch as\n//\t\t\t-log_backtrace_at=gopherflakes.go:234\n//\t\ta stack trace will be written to the Info log whenever execution\n//\t\thits that statement. (Unlike with -vmodule, the \".go\" must be\n//\t\tpresent.)\n//\t-v=0\n//\t\tEnable V-leveled logging at the specified level.\n//\t-vmodule=\"\"\n//\t\tThe syntax of the argument is a comma-separated list of pattern=N,\n//\t\twhere pattern is a literal file name (minus the \".go\" suffix) or\n//\t\t\"glob\" pattern and N is a V level. For instance,\n//\t\t\t-vmodule=gopher*=3\n//\t\tsets the V level to 3 in all Go files whose names begin \"gopher\".\n//\npackage glog\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\tstdLog \"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\n// severity identifies the sort of log: info, warning etc. It also implements\n// the flag.Value interface. The -stderrthreshold flag is of type severity and\n// should be modified only through the flag.Value interface. The values match\n// the corresponding constants in C++.\ntype severity int32 // sync/atomic int32\n\n// These constants identify the log levels in order of increasing severity.\n// A message written to a high-severity log file is also written to each\n// lower-severity log file.\nconst (\n\tinfoLog severity = iota\n\twarningLog\n\terrorLog\n\tfatalLog\n\tnumSeverity = 4\n)\n\nconst severityChar = \"IWEF\"\n\nvar severityName = []string{\n\tinfoLog:    \"INFO\",\n\twarningLog: \"WARNING\",\n\terrorLog:   \"ERROR\",\n\tfatalLog:   \"FATAL\",\n}\n\n// get returns the value of the severity.\nfunc (s *severity) get() severity {\n\treturn severity(atomic.LoadInt32((*int32)(s)))\n}\n\n// set sets the value of the severity.\nfunc (s *severity) set(val severity) {\n\tatomic.StoreInt32((*int32)(s), int32(val))\n}\n\n// String is part of the flag.Value interface.\nfunc (s *severity) String() string {\n\treturn strconv.FormatInt(int64(*s), 10)\n}\n\n// Get is part of the flag.Value interface.\nfunc (s *severity) Get() interface{} {\n\treturn *s\n}\n\n// Set is part of the flag.Value interface.\nfunc (s *severity) Set(value string) error {\n\tvar threshold severity\n\t// Is it a known name?\n\tif v, ok := severityByName(value); ok {\n\t\tthreshold = v\n\t} else {\n\t\tv, err := strconv.Atoi(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tthreshold = severity(v)\n\t}\n\tlogging.stderrThreshold.set(threshold)\n\treturn nil\n}\n\nfunc severityByName(s string) (severity, bool) {\n\ts = strings.ToUpper(s)\n\tfor i, name := range severityName {\n\t\tif name == s {\n\t\t\treturn severity(i), true\n\t\t}\n\t}\n\treturn 0, false\n}\n\n// OutputStats tracks the number of output lines and bytes written.\ntype OutputStats struct {\n\tlines int64\n\tbytes int64\n}\n\n// Lines returns the number of lines written.\nfunc (s *OutputStats) Lines() int64 {\n\treturn atomic.LoadInt64(&s.lines)\n}\n\n// Bytes returns the number of bytes written.\nfunc (s *OutputStats) Bytes() int64 {\n\treturn atomic.LoadInt64(&s.bytes)\n}\n\n// Stats tracks the number of lines of output and number of bytes\n// per severity level. Values must be read with atomic.LoadInt64.\nvar Stats struct {\n\tInfo, Warning, Error OutputStats\n}\n\nvar severityStats = [numSeverity]*OutputStats{\n\tinfoLog:    &Stats.Info,\n\twarningLog: &Stats.Warning,\n\terrorLog:   &Stats.Error,\n}\n\n// Level is exported because it appears in the arguments to V and is\n// the type of the v flag, which can be set programmatically.\n// It's a distinct type because we want to discriminate it from logType.\n// Variables of type level are only changed under logging.mu.\n// The -v flag is read only with atomic ops, so the state of the logging\n// module is consistent.\n\n// Level is treated as a sync/atomic int32.\n\n// Level specifies a level of verbosity for V logs. *Level implements\n// flag.Value; the -v flag is of type Level and should be modified\n// only through the flag.Value interface.\ntype Level int32\n\n// get returns the value of the Level.\nfunc (l *Level) get() Level {\n\treturn Level(atomic.LoadInt32((*int32)(l)))\n}\n\n// set sets the value of the Level.\nfunc (l *Level) set(val Level) {\n\tatomic.StoreInt32((*int32)(l), int32(val))\n}\n\n// String is part of the flag.Value interface.\nfunc (l *Level) String() string {\n\treturn strconv.FormatInt(int64(*l), 10)\n}\n\n// Get is part of the flag.Value interface.\nfunc (l *Level) Get() interface{} {\n\treturn *l\n}\n\n// Set is part of the flag.Value interface.\nfunc (l *Level) Set(value string) error {\n\tv, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogging.mu.Lock()\n\tdefer logging.mu.Unlock()\n\tlogging.setVState(Level(v), logging.vmodule.filter, false)\n\treturn nil\n}\n\n// moduleSpec represents the setting of the -vmodule flag.\ntype moduleSpec struct {\n\tfilter []modulePat\n}\n\n// modulePat contains a filter for the -vmodule flag.\n// It holds a verbosity level and a file pattern to match.\ntype modulePat struct {\n\tpattern string\n\tliteral bool // The pattern is a literal string\n\tlevel   Level\n}\n\n// match reports whether the file matches the pattern. It uses a string\n// comparison if the pattern contains no metacharacters.\nfunc (m *modulePat) match(file string) bool {\n\tif m.literal {\n\t\treturn file == m.pattern\n\t}\n\tmatch, _ := filepath.Match(m.pattern, file)\n\treturn match\n}\n\nfunc (m *moduleSpec) String() string {\n\t// Lock because the type is not atomic. TODO: clean this up.\n\tlogging.mu.Lock()\n\tdefer logging.mu.Unlock()\n\tvar b bytes.Buffer\n\tfor i, f := range m.filter {\n\t\tif i > 0 {\n\t\t\tb.WriteRune(',')\n\t\t}\n\t\tfmt.Fprintf(&b, \"%s=%d\", f.pattern, f.level)\n\t}\n\treturn b.String()\n}\n\n// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the\n// struct is not exported.\nfunc (m *moduleSpec) Get() interface{} {\n\treturn nil\n}\n\nvar errVmoduleSyntax = errors.New(\"syntax error: expect comma-separated list of filename=N\")\n\n// Syntax: -vmodule=recordio=2,file=1,gfs*=3\nfunc (m *moduleSpec) Set(value string) error {\n\tvar filter []modulePat\n\tfor _, pat := range strings.Split(value, \",\") {\n\t\tif len(pat) == 0 {\n\t\t\t// Empty strings such as from a trailing comma can be ignored.\n\t\t\tcontinue\n\t\t}\n\t\tpatLev := strings.Split(pat, \"=\")\n\t\tif len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {\n\t\t\treturn errVmoduleSyntax\n\t\t}\n\t\tpattern := patLev[0]\n\t\tv, err := strconv.Atoi(patLev[1])\n\t\tif err != nil {\n\t\t\treturn errors.New(\"syntax error: expect comma-separated list of filename=N\")\n\t\t}\n\t\tif v < 0 {\n\t\t\treturn errors.New(\"negative value for vmodule level\")\n\t\t}\n\t\tif v == 0 {\n\t\t\tcontinue // Ignore. It's harmless but no point in paying the overhead.\n\t\t}\n\t\t// TODO: check syntax of filter?\n\t\tfilter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})\n\t}\n\tlogging.mu.Lock()\n\tdefer logging.mu.Unlock()\n\tlogging.setVState(logging.verbosity, filter, true)\n\treturn nil\n}\n\n// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters\n// that require filepath.Match to be called to match the pattern.\nfunc isLiteral(pattern string) bool {\n\treturn !strings.ContainsAny(pattern, `\\*?[]`)\n}\n\n// traceLocation represents the setting of the -log_backtrace_at flag.\ntype traceLocation struct {\n\tfile string\n\tline int\n}\n\n// isSet reports whether the trace location has been specified.\n// logging.mu is held.\nfunc (t *traceLocation) isSet() bool {\n\treturn t.line > 0\n}\n\n// match reports whether the specified file and line matches the trace location.\n// The argument file name is the full path, not the basename specified in the flag.\n// logging.mu is held.\nfunc (t *traceLocation) match(file string, line int) bool {\n\tif t.line != line {\n\t\treturn false\n\t}\n\tif i := strings.LastIndex(file, \"/\"); i >= 0 {\n\t\tfile = file[i+1:]\n\t}\n\treturn t.file == file\n}\n\nfunc (t *traceLocation) String() string {\n\t// Lock because the type is not atomic. TODO: clean this up.\n\tlogging.mu.Lock()\n\tdefer logging.mu.Unlock()\n\treturn fmt.Sprintf(\"%s:%d\", t.file, t.line)\n}\n\n// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the\n// struct is not exported\nfunc (t *traceLocation) Get() interface{} {\n\treturn nil\n}\n\nvar errTraceSyntax = errors.New(\"syntax error: expect file.go:234\")\n\n// Syntax: -log_backtrace_at=gopherflakes.go:234\n// Note that unlike vmodule the file extension is included here.\nfunc (t *traceLocation) Set(value string) error {\n\tif value == \"\" {\n\t\t// Unset.\n\t\tt.line = 0\n\t\tt.file = \"\"\n\t}\n\tfields := strings.Split(value, \":\")\n\tif len(fields) != 2 {\n\t\treturn errTraceSyntax\n\t}\n\tfile, line := fields[0], fields[1]\n\tif !strings.Contains(file, \".\") {\n\t\treturn errTraceSyntax\n\t}\n\tv, err := strconv.Atoi(line)\n\tif err != nil {\n\t\treturn errTraceSyntax\n\t}\n\tif v <= 0 {\n\t\treturn errors.New(\"negative or zero value for level\")\n\t}\n\tlogging.mu.Lock()\n\tdefer logging.mu.Unlock()\n\tt.line = v\n\tt.file = file\n\treturn nil\n}\n\n// flushSyncWriter is the interface satisfied by logging destinations.\ntype flushSyncWriter interface {\n\tFlush() error\n\tSync() error\n\tio.Writer\n}\n\nfunc init() {\n\tflag.BoolVar(&logging.toStderr, \"logtostderr\", false, \"log to standard error instead of files\")\n\tflag.BoolVar(&logging.alsoToStderr, \"alsologtostderr\", false, \"log to standard error as well as files\")\n\tflag.Var(&logging.verbosity, \"v\", \"log level for V logs\")\n\tflag.Var(&logging.stderrThreshold, \"stderrthreshold\", \"logs at or above this threshold go to stderr\")\n\tflag.Var(&logging.vmodule, \"vmodule\", \"comma-separated list of pattern=N settings for file-filtered logging\")\n\tflag.Var(&logging.traceLocation, \"log_backtrace_at\", \"when logging hits line file:N, emit a stack trace\")\n\n\t// Default stderrThreshold is ERROR.\n\tlogging.stderrThreshold = errorLog\n\n\tlogging.setVState(0, nil, false)\n\tgo logging.flushDaemon()\n}\n\n// Flush flushes all pending log I/O.\nfunc Flush() {\n\tlogging.lockAndFlushAll()\n}\n\n// loggingT collects all the global state of the logging setup.\ntype loggingT struct {\n\t// Boolean flags. Not handled atomically because the flag.Value interface\n\t// does not let us avoid the =true, and that shorthand is necessary for\n\t// compatibility. TODO: does this matter enough to fix? Seems unlikely.\n\ttoStderr     bool // The -logtostderr flag.\n\talsoToStderr bool // The -alsologtostderr flag.\n\n\t// Level flag. Handled atomically.\n\tstderrThreshold severity // The -stderrthreshold flag.\n\n\t// freeList is a list of byte buffers, maintained under freeListMu.\n\tfreeList *buffer\n\t// freeListMu maintains the free list. It is separate from the main mutex\n\t// so buffers can be grabbed and printed to without holding the main lock,\n\t// for better parallelization.\n\tfreeListMu sync.Mutex\n\n\t// mu protects the remaining elements of this structure and is\n\t// used to synchronize logging.\n\tmu sync.Mutex\n\t// file holds writer for each of the log types.\n\tfile [numSeverity]flushSyncWriter\n\t// pcs is used in V to avoid an allocation when computing the caller's PC.\n\tpcs [1]uintptr\n\t// vmap is a cache of the V Level for each V() call site, identified by PC.\n\t// It is wiped whenever the vmodule flag changes state.\n\tvmap map[uintptr]Level\n\t// filterLength stores the length of the vmodule filter chain. If greater\n\t// than zero, it means vmodule is enabled. It may be read safely\n\t// using sync.LoadInt32, but is only modified under mu.\n\tfilterLength int32\n\t// traceLocation is the state of the -log_backtrace_at flag.\n\ttraceLocation traceLocation\n\t// These flags are modified only under lock, although verbosity may be fetched\n\t// safely using atomic.LoadInt32.\n\tvmodule   moduleSpec // The state of the -vmodule flag.\n\tverbosity Level      // V logging level, the value of the -v flag/\n}\n\n// buffer holds a byte Buffer for reuse. The zero value is ready for use.\ntype buffer struct {\n\tbytes.Buffer\n\ttmp  [64]byte // temporary byte array for creating headers.\n\tnext *buffer\n}\n\nvar logging loggingT\n\n// setVState sets a consistent state for V logging.\n// l.mu is held.\nfunc (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {\n\t// Turn verbosity off so V will not fire while we are in transition.\n\tlogging.verbosity.set(0)\n\t// Ditto for filter length.\n\tatomic.StoreInt32(&logging.filterLength, 0)\n\n\t// Set the new filters and wipe the pc->Level map if the filter has changed.\n\tif setFilter {\n\t\tlogging.vmodule.filter = filter\n\t\tlogging.vmap = make(map[uintptr]Level)\n\t}\n\n\t// Things are consistent now, so enable filtering and verbosity.\n\t// They are enabled in order opposite to that in V.\n\tatomic.StoreInt32(&logging.filterLength, int32(len(filter)))\n\tlogging.verbosity.set(verbosity)\n}\n\n// getBuffer returns a new, ready-to-use buffer.\nfunc (l *loggingT) getBuffer() *buffer {\n\tl.freeListMu.Lock()\n\tb := l.freeList\n\tif b != nil {\n\t\tl.freeList = b.next\n\t}\n\tl.freeListMu.Unlock()\n\tif b == nil {\n\t\tb = new(buffer)\n\t} else {\n\t\tb.next = nil\n\t\tb.Reset()\n\t}\n\treturn b\n}\n\n// putBuffer returns a buffer to the free list.\nfunc (l *loggingT) putBuffer(b *buffer) {\n\tif b.Len() >= 256 {\n\t\t// Let big buffers die a natural death.\n\t\treturn\n\t}\n\tl.freeListMu.Lock()\n\tb.next = l.freeList\n\tl.freeList = b\n\tl.freeListMu.Unlock()\n}\n\nvar timeNow = time.Now // Stubbed out for testing.\n\n/*\nheader formats a log header as defined by the C++ implementation.\nIt returns a buffer containing the formatted header and the user's file and line number.\nThe depth specifies how many stack frames above lives the source line to be identified in the log message.\n\nLog lines have this form:\n\tLmmdd hh:mm:ss.uuuuuu threadid file:line] msg...\nwhere the fields are defined as follows:\n\tL                A single character, representing the log level (eg 'I' for INFO)\n\tmm               The month (zero padded; ie May is '05')\n\tdd               The day (zero padded)\n\thh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds\n\tthreadid         The space-padded thread ID as returned by GetTID()\n\tfile             The file name\n\tline             The line number\n\tmsg              The user-supplied message\n*/\nfunc (l *loggingT) header(s severity, depth int) (*buffer, string, int) {\n\t_, file, line, ok := runtime.Caller(3 + depth)\n\tif !ok {\n\t\tfile = \"???\"\n\t\tline = 1\n\t} else {\n\t\tslash := strings.LastIndex(file, \"/\")\n\t\tif slash >= 0 {\n\t\t\tfile = file[slash+1:]\n\t\t}\n\t}\n\treturn l.formatHeader(s, file, line), file, line\n}\n\n// formatHeader formats a log header using the provided file name and line number.\nfunc (l *loggingT) formatHeader(s severity, file string, line int) *buffer {\n\tnow := timeNow()\n\tif line < 0 {\n\t\tline = 0 // not a real line number, but acceptable to someDigits\n\t}\n\tif s > fatalLog {\n\t\ts = infoLog // for safety.\n\t}\n\tbuf := l.getBuffer()\n\n\t// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.\n\t// It's worth about 3X. Fprintf is hard.\n\t_, month, day := now.Date()\n\thour, minute, second := now.Clock()\n\t// Lmmdd hh:mm:ss.uuuuuu threadid file:line]\n\tbuf.tmp[0] = severityChar[s]\n\tbuf.twoDigits(1, int(month))\n\tbuf.twoDigits(3, day)\n\tbuf.tmp[5] = ' '\n\tbuf.twoDigits(6, hour)\n\tbuf.tmp[8] = ':'\n\tbuf.twoDigits(9, minute)\n\tbuf.tmp[11] = ':'\n\tbuf.twoDigits(12, second)\n\tbuf.tmp[14] = '.'\n\tbuf.nDigits(6, 15, now.Nanosecond()/1000, '0')\n\tbuf.tmp[21] = ' '\n\tbuf.nDigits(7, 22, pid, ' ') // TODO: should be TID\n\tbuf.tmp[29] = ' '\n\tbuf.Write(buf.tmp[:30])\n\tbuf.WriteString(file)\n\tbuf.tmp[0] = ':'\n\tn := buf.someDigits(1, line)\n\tbuf.tmp[n+1] = ']'\n\tbuf.tmp[n+2] = ' '\n\tbuf.Write(buf.tmp[:n+3])\n\treturn buf\n}\n\n// Some custom tiny helper functions to print the log header efficiently.\n\nconst digits = \"0123456789\"\n\n// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].\nfunc (buf *buffer) twoDigits(i, d int) {\n\tbuf.tmp[i+1] = digits[d%10]\n\td /= 10\n\tbuf.tmp[i] = digits[d%10]\n}\n\n// nDigits formats an n-digit integer at buf.tmp[i],\n// padding with pad on the left.\n// It assumes d >= 0.\nfunc (buf *buffer) nDigits(n, i, d int, pad byte) {\n\tj := n - 1\n\tfor ; j >= 0 && d > 0; j-- {\n\t\tbuf.tmp[i+j] = digits[d%10]\n\t\td /= 10\n\t}\n\tfor ; j >= 0; j-- {\n\t\tbuf.tmp[i+j] = pad\n\t}\n}\n\n// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].\nfunc (buf *buffer) someDigits(i, d int) int {\n\t// Print into the top, then copy down. We know there's space for at least\n\t// a 10-digit number.\n\tj := len(buf.tmp)\n\tfor {\n\t\tj--\n\t\tbuf.tmp[j] = digits[d%10]\n\t\td /= 10\n\t\tif d == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn copy(buf.tmp[i:], buf.tmp[j:])\n}\n\nfunc (l *loggingT) println(s severity, args ...interface{}) {\n\tbuf, file, line := l.header(s, 0)\n\tfmt.Fprintln(buf, args...)\n\tl.output(s, buf, file, line, false)\n}\n\nfunc (l *loggingT) print(s severity, args ...interface{}) {\n\tl.printDepth(s, 1, args...)\n}\n\nfunc (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {\n\tbuf, file, line := l.header(s, depth)\n\tfmt.Fprint(buf, args...)\n\tif buf.Bytes()[buf.Len()-1] != '\\n' {\n\t\tbuf.WriteByte('\\n')\n\t}\n\tl.output(s, buf, file, line, false)\n}\n\nfunc (l *loggingT) printf(s severity, format string, args ...interface{}) {\n\tbuf, file, line := l.header(s, 0)\n\tfmt.Fprintf(buf, format, args...)\n\tif buf.Bytes()[buf.Len()-1] != '\\n' {\n\t\tbuf.WriteByte('\\n')\n\t}\n\tl.output(s, buf, file, line, false)\n}\n\n// printWithFileLine behaves like print but uses the provided file and line number.  If\n// alsoLogToStderr is true, the log message always appears on standard error; it\n// will also appear in the log file unless --logtostderr is set.\nfunc (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {\n\tbuf := l.formatHeader(s, file, line)\n\tfmt.Fprint(buf, args...)\n\tif buf.Bytes()[buf.Len()-1] != '\\n' {\n\t\tbuf.WriteByte('\\n')\n\t}\n\tl.output(s, buf, file, line, alsoToStderr)\n}\n\n// output writes the data to the log files and releases the buffer.\nfunc (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {\n\tl.mu.Lock()\n\tif l.traceLocation.isSet() {\n\t\tif l.traceLocation.match(file, line) {\n\t\t\tbuf.Write(stacks(false))\n\t\t}\n\t}\n\tdata := buf.Bytes()\n\tif l.toStderr {\n\t\tos.Stderr.Write(data)\n\t} else {\n\t\tif alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {\n\t\t\tos.Stderr.Write(data)\n\t\t}\n\t\tif l.file[s] == nil {\n\t\t\tif err := l.createFiles(s); err != nil {\n\t\t\t\tos.Stderr.Write(data) // Make sure the message appears somewhere.\n\t\t\t\tl.exit(err)\n\t\t\t}\n\t\t}\n\t\tswitch s {\n\t\tcase fatalLog:\n\t\t\tl.file[fatalLog].Write(data)\n\t\t\tfallthrough\n\t\tcase errorLog:\n\t\t\tl.file[errorLog].Write(data)\n\t\t\tfallthrough\n\t\tcase warningLog:\n\t\t\tl.file[warningLog].Write(data)\n\t\t\tfallthrough\n\t\tcase infoLog:\n\t\t\tl.file[infoLog].Write(data)\n\t\t}\n\t}\n\tif s == fatalLog {\n\t\t// If we got here via Exit rather than Fatal, print no stacks.\n\t\tif atomic.LoadUint32(&fatalNoStacks) > 0 {\n\t\t\tl.mu.Unlock()\n\t\t\ttimeoutFlush(10 * time.Second)\n\t\t\tos.Exit(1)\n\t\t}\n\t\t// Dump all goroutine stacks before exiting.\n\t\t// First, make sure we see the trace for the current goroutine on standard error.\n\t\t// If -logtostderr has been specified, the loop below will do that anyway\n\t\t// as the first stack in the full dump.\n\t\tif !l.toStderr {\n\t\t\tos.Stderr.Write(stacks(false))\n\t\t}\n\t\t// Write the stack trace for all goroutines to the files.\n\t\ttrace := stacks(true)\n\t\tlogExitFunc = func(error) {} // If we get a write error, we'll still exit below.\n\t\tfor log := fatalLog; log >= infoLog; log-- {\n\t\t\tif f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.\n\t\t\t\tf.Write(trace)\n\t\t\t}\n\t\t}\n\t\tl.mu.Unlock()\n\t\ttimeoutFlush(10 * time.Second)\n\t\tos.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.\n\t}\n\tl.putBuffer(buf)\n\tl.mu.Unlock()\n\tif stats := severityStats[s]; stats != nil {\n\t\tatomic.AddInt64(&stats.lines, 1)\n\t\tatomic.AddInt64(&stats.bytes, int64(len(data)))\n\t}\n}\n\n// timeoutFlush calls Flush and returns when it completes or after timeout\n// elapses, whichever happens first.  This is needed because the hooks invoked\n// by Flush may deadlock when glog.Fatal is called from a hook that holds\n// a lock.\nfunc timeoutFlush(timeout time.Duration) {\n\tdone := make(chan bool, 1)\n\tgo func() {\n\t\tFlush() // calls logging.lockAndFlushAll()\n\t\tdone <- true\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(timeout):\n\t\tfmt.Fprintln(os.Stderr, \"glog: Flush took longer than\", timeout)\n\t}\n}\n\n// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.\nfunc stacks(all bool) []byte {\n\t// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.\n\tn := 10000\n\tif all {\n\t\tn = 100000\n\t}\n\tvar trace []byte\n\tfor i := 0; i < 5; i++ {\n\t\ttrace = make([]byte, n)\n\t\tnbytes := runtime.Stack(trace, all)\n\t\tif nbytes < len(trace) {\n\t\t\treturn trace[:nbytes]\n\t\t}\n\t\tn *= 2\n\t}\n\treturn trace\n}\n\n// logExitFunc provides a simple mechanism to override the default behavior\n// of exiting on error. Used in testing and to guarantee we reach a required exit\n// for fatal logs. Instead, exit could be a function rather than a method but that\n// would make its use clumsier.\nvar logExitFunc func(error)\n\n// exit is called if there is trouble creating or writing log files.\n// It flushes the logs and exits the program; there's no point in hanging around.\n// l.mu is held.\nfunc (l *loggingT) exit(err error) {\n\tfmt.Fprintf(os.Stderr, \"log: exiting because of error: %s\\n\", err)\n\t// If logExitFunc is set, we do that instead of exiting.\n\tif logExitFunc != nil {\n\t\tlogExitFunc(err)\n\t\treturn\n\t}\n\tl.flushAll()\n\tos.Exit(2)\n}\n\n// syncBuffer joins a bufio.Writer to its underlying file, providing access to the\n// file's Sync method and providing a wrapper for the Write method that provides log\n// file rotation. There are conflicting methods, so the file cannot be embedded.\n// l.mu is held for all its methods.\ntype syncBuffer struct {\n\tlogger *loggingT\n\t*bufio.Writer\n\tfile   *os.File\n\tsev    severity\n\tnbytes uint64 // The number of bytes written to this file\n}\n\nfunc (sb *syncBuffer) Sync() error {\n\treturn sb.file.Sync()\n}\n\nfunc (sb *syncBuffer) Write(p []byte) (n int, err error) {\n\tif sb.nbytes+uint64(len(p)) >= MaxSize {\n\t\tif err := sb.rotateFile(time.Now()); err != nil {\n\t\t\tsb.logger.exit(err)\n\t\t}\n\t}\n\tn, err = sb.Writer.Write(p)\n\tsb.nbytes += uint64(n)\n\tif err != nil {\n\t\tsb.logger.exit(err)\n\t}\n\treturn\n}\n\n// rotateFile closes the syncBuffer's file and starts a new one.\nfunc (sb *syncBuffer) rotateFile(now time.Time) error {\n\tif sb.file != nil {\n\t\tsb.Flush()\n\t\tsb.file.Close()\n\t}\n\tvar err error\n\tsb.file, _, err = create(severityName[sb.sev], now)\n\tsb.nbytes = 0\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsb.Writer = bufio.NewWriterSize(sb.file, bufferSize)\n\n\t// Write header.\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"Log file created at: %s\\n\", now.Format(\"2006/01/02 15:04:05\"))\n\tfmt.Fprintf(&buf, \"Running on machine: %s\\n\", host)\n\tfmt.Fprintf(&buf, \"Binary: Built with %s %s for %s/%s\\n\", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)\n\tfmt.Fprintf(&buf, \"Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\\n\")\n\tn, err := sb.file.Write(buf.Bytes())\n\tsb.nbytes += uint64(n)\n\treturn err\n}\n\n// bufferSize sizes the buffer associated with each log file. It's large\n// so that log records can accumulate without the logging thread blocking\n// on disk I/O. The flushDaemon will block instead.\nconst bufferSize = 256 * 1024\n\n// createFiles creates all the log files for severity from sev down to infoLog.\n// l.mu is held.\nfunc (l *loggingT) createFiles(sev severity) error {\n\tnow := time.Now()\n\t// Files are created in decreasing severity order, so as soon as we find one\n\t// has already been created, we can stop.\n\tfor s := sev; s >= infoLog && l.file[s] == nil; s-- {\n\t\tsb := &syncBuffer{\n\t\t\tlogger: l,\n\t\t\tsev:    s,\n\t\t}\n\t\tif err := sb.rotateFile(now); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tl.file[s] = sb\n\t}\n\treturn nil\n}\n\nconst flushInterval = 30 * time.Second\n\n// flushDaemon periodically flushes the log file buffers.\nfunc (l *loggingT) flushDaemon() {\n\tfor _ = range time.NewTicker(flushInterval).C {\n\t\tl.lockAndFlushAll()\n\t}\n}\n\n// lockAndFlushAll is like flushAll but locks l.mu first.\nfunc (l *loggingT) lockAndFlushAll() {\n\tl.mu.Lock()\n\tl.flushAll()\n\tl.mu.Unlock()\n}\n\n// flushAll flushes all the logs and attempts to \"sync\" their data to disk.\n// l.mu is held.\nfunc (l *loggingT) flushAll() {\n\t// Flush from fatal down, in case there's trouble flushing.\n\tfor s := fatalLog; s >= infoLog; s-- {\n\t\tfile := l.file[s]\n\t\tif file != nil {\n\t\t\tfile.Flush() // ignore error\n\t\t\tfile.Sync()  // ignore error\n\t\t}\n\t}\n}\n\n// CopyStandardLogTo arranges for messages written to the Go \"log\" package's\n// default logs to also appear in the Google logs for the named and lower\n// severities.  Subsequent changes to the standard log's default output location\n// or format may break this behavior.\n//\n// Valid names are \"INFO\", \"WARNING\", \"ERROR\", and \"FATAL\".  If the name is not\n// recognized, CopyStandardLogTo panics.\nfunc CopyStandardLogTo(name string) {\n\tsev, ok := severityByName(name)\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"log.CopyStandardLogTo(%q): unrecognized severity name\", name))\n\t}\n\t// Set a log format that captures the user's file and line:\n\t//   d.go:23: message\n\tstdLog.SetFlags(stdLog.Lshortfile)\n\tstdLog.SetOutput(logBridge(sev))\n}\n\n// logBridge provides the Write method that enables CopyStandardLogTo to connect\n// Go's standard logs to the logs provided by this package.\ntype logBridge severity\n\n// Write parses the standard logging line and passes its components to the\n// logger for severity(lb).\nfunc (lb logBridge) Write(b []byte) (n int, err error) {\n\tvar (\n\t\tfile = \"???\"\n\t\tline = 1\n\t\ttext string\n\t)\n\t// Split \"d.go:23: message\" into \"d.go\", \"23\", and \"message\".\n\tif parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {\n\t\ttext = fmt.Sprintf(\"bad log format: %s\", b)\n\t} else {\n\t\tfile = string(parts[0])\n\t\ttext = string(parts[2][1:]) // skip leading space\n\t\tline, err = strconv.Atoi(string(parts[1]))\n\t\tif err != nil {\n\t\t\ttext = fmt.Sprintf(\"bad line number: %s\", b)\n\t\t\tline = 1\n\t\t}\n\t}\n\t// printWithFileLine with alsoToStderr=true, so standard log messages\n\t// always appear on standard error.\n\tlogging.printWithFileLine(severity(lb), file, line, true, text)\n\treturn len(b), nil\n}\n\n// setV computes and remembers the V level for a given PC\n// when vmodule is enabled.\n// File pattern matching takes the basename of the file, stripped\n// of its .go suffix, and uses filepath.Match, which is a little more\n// general than the *? matching used in C++.\n// l.mu is held.\nfunc (l *loggingT) setV(pc uintptr) Level {\n\tfn := runtime.FuncForPC(pc)\n\tfile, _ := fn.FileLine(pc)\n\t// The file is something like /a/b/c/d.go. We want just the d.\n\tif strings.HasSuffix(file, \".go\") {\n\t\tfile = file[:len(file)-3]\n\t}\n\tif slash := strings.LastIndex(file, \"/\"); slash >= 0 {\n\t\tfile = file[slash+1:]\n\t}\n\tfor _, filter := range l.vmodule.filter {\n\t\tif filter.match(file) {\n\t\t\tl.vmap[pc] = filter.level\n\t\t\treturn filter.level\n\t\t}\n\t}\n\tl.vmap[pc] = 0\n\treturn 0\n}\n\n// Verbose is a boolean type that implements Infof (like Printf) etc.\n// See the documentation of V for more information.\ntype Verbose bool\n\n// V reports whether verbosity at the call site is at least the requested level.\n// The returned value is a boolean of type Verbose, which implements Info, Infoln\n// and Infof. These methods will write to the Info log if called.\n// Thus, one may write either\n//\tif glog.V(2) { glog.Info(\"log this\") }\n// or\n//\tglog.V(2).Info(\"log this\")\n// The second form is shorter but the first is cheaper if logging is off because it does\n// not evaluate its arguments.\n//\n// Whether an individual call to V generates a log record depends on the setting of\n// the -v and --vmodule flags; both are off by default. If the level in the call to\n// V is at least the value of -v, or of -vmodule for the source file containing the\n// call, the V call will log.\nfunc V(level Level) Verbose {\n\t// This function tries hard to be cheap unless there's work to do.\n\t// The fast path is two atomic loads and compares.\n\n\t// Here is a cheap but safe test to see if V logging is enabled globally.\n\tif logging.verbosity.get() >= level {\n\t\treturn Verbose(true)\n\t}\n\n\t// It's off globally but it vmodule may still be set.\n\t// Here is another cheap but safe test to see if vmodule is enabled.\n\tif atomic.LoadInt32(&logging.filterLength) > 0 {\n\t\t// Now we need a proper lock to use the logging structure. The pcs field\n\t\t// is shared so we must lock before accessing it. This is fairly expensive,\n\t\t// but if V logging is enabled we're slow anyway.\n\t\tlogging.mu.Lock()\n\t\tdefer logging.mu.Unlock()\n\t\tif runtime.Callers(2, logging.pcs[:]) == 0 {\n\t\t\treturn Verbose(false)\n\t\t}\n\t\tv, ok := logging.vmap[logging.pcs[0]]\n\t\tif !ok {\n\t\t\tv = logging.setV(logging.pcs[0])\n\t\t}\n\t\treturn Verbose(v >= level)\n\t}\n\treturn Verbose(false)\n}\n\n// Info is equivalent to the global Info function, guarded by the value of v.\n// See the documentation of V for usage.\nfunc (v Verbose) Info(args ...interface{}) {\n\tif v {\n\t\tlogging.print(infoLog, args...)\n\t}\n}\n\n// Infoln is equivalent to the global Infoln function, guarded by the value of v.\n// See the documentation of V for usage.\nfunc (v Verbose) Infoln(args ...interface{}) {\n\tif v {\n\t\tlogging.println(infoLog, args...)\n\t}\n}\n\n// Infof is equivalent to the global Infof function, guarded by the value of v.\n// See the documentation of V for usage.\nfunc (v Verbose) Infof(format string, args ...interface{}) {\n\tif v {\n\t\tlogging.printf(infoLog, format, args...)\n\t}\n}\n\n// Info logs to the INFO log.\n// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.\nfunc Info(args ...interface{}) {\n\tlogging.print(infoLog, args...)\n}\n\n// InfoDepth acts as Info but uses depth to determine which call frame to log.\n// InfoDepth(0, \"msg\") is the same as Info(\"msg\").\nfunc InfoDepth(depth int, args ...interface{}) {\n\tlogging.printDepth(infoLog, depth, args...)\n}\n\n// Infoln logs to the INFO log.\n// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.\nfunc Infoln(args ...interface{}) {\n\tlogging.println(infoLog, args...)\n}\n\n// Infof logs to the INFO log.\n// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.\nfunc Infof(format string, args ...interface{}) {\n\tlogging.printf(infoLog, format, args...)\n}\n\n// Warning logs to the WARNING and INFO logs.\n// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.\nfunc Warning(args ...interface{}) {\n\tlogging.print(warningLog, args...)\n}\n\n// WarningDepth acts as Warning but uses depth to determine which call frame to log.\n// WarningDepth(0, \"msg\") is the same as Warning(\"msg\").\nfunc WarningDepth(depth int, args ...interface{}) {\n\tlogging.printDepth(warningLog, depth, args...)\n}\n\n// Warningln logs to the WARNING and INFO logs.\n// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.\nfunc Warningln(args ...interface{}) {\n\tlogging.println(warningLog, args...)\n}\n\n// Warningf logs to the WARNING and INFO logs.\n// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.\nfunc Warningf(format string, args ...interface{}) {\n\tlogging.printf(warningLog, format, args...)\n}\n\n// Error logs to the ERROR, WARNING, and INFO logs.\n// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.\nfunc Error(args ...interface{}) {\n\tlogging.print(errorLog, args...)\n}\n\n// ErrorDepth acts as Error but uses depth to determine which call frame to log.\n// ErrorDepth(0, \"msg\") is the same as Error(\"msg\").\nfunc ErrorDepth(depth int, args ...interface{}) {\n\tlogging.printDepth(errorLog, depth, args...)\n}\n\n// Errorln logs to the ERROR, WARNING, and INFO logs.\n// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.\nfunc Errorln(args ...interface{}) {\n\tlogging.println(errorLog, args...)\n}\n\n// Errorf logs to the ERROR, WARNING, and INFO logs.\n// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.\nfunc Errorf(format string, args ...interface{}) {\n\tlogging.printf(errorLog, format, args...)\n}\n\n// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,\n// including a stack trace of all running goroutines, then calls os.Exit(255).\n// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.\nfunc Fatal(args ...interface{}) {\n\tlogging.print(fatalLog, args...)\n}\n\n// FatalDepth acts as Fatal but uses depth to determine which call frame to log.\n// FatalDepth(0, \"msg\") is the same as Fatal(\"msg\").\nfunc FatalDepth(depth int, args ...interface{}) {\n\tlogging.printDepth(fatalLog, depth, args...)\n}\n\n// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,\n// including a stack trace of all running goroutines, then calls os.Exit(255).\n// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.\nfunc Fatalln(args ...interface{}) {\n\tlogging.println(fatalLog, args...)\n}\n\n// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,\n// including a stack trace of all running goroutines, then calls os.Exit(255).\n// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.\nfunc Fatalf(format string, args ...interface{}) {\n\tlogging.printf(fatalLog, format, args...)\n}\n\n// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.\n// It allows Exit and relatives to use the Fatal logs.\nvar fatalNoStacks uint32\n\n// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).\n// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.\nfunc Exit(args ...interface{}) {\n\tatomic.StoreUint32(&fatalNoStacks, 1)\n\tlogging.print(fatalLog, args...)\n}\n\n// ExitDepth acts as Exit but uses depth to determine which call frame to log.\n// ExitDepth(0, \"msg\") is the same as Exit(\"msg\").\nfunc ExitDepth(depth int, args ...interface{}) {\n\tatomic.StoreUint32(&fatalNoStacks, 1)\n\tlogging.printDepth(fatalLog, depth, args...)\n}\n\n// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).\nfunc Exitln(args ...interface{}) {\n\tatomic.StoreUint32(&fatalNoStacks, 1)\n\tlogging.println(fatalLog, args...)\n}\n\n// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).\n// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.\nfunc Exitf(format string, args ...interface{}) {\n\tatomic.StoreUint32(&fatalNoStacks, 1)\n\tlogging.printf(fatalLog, format, args...)\n}\n"
  },
  {
    "path": "vendor/github.com/golang/glog/glog_file.go",
    "content": "// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/\n//\n// Copyright 2013 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// File I/O for logs.\n\npackage glog\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n// MaxSize is the maximum size of a log file in bytes.\nvar MaxSize uint64 = 1024 * 1024 * 1800\n\n// logDirs lists the candidate directories for new log files.\nvar logDirs []string\n\n// If non-empty, overrides the choice of directory in which to write logs.\n// See createLogDirs for the full list of possible destinations.\nvar logDir = flag.String(\"log_dir\", \"\", \"If non-empty, write log files in this directory\")\n\nfunc createLogDirs() {\n\tif *logDir != \"\" {\n\t\tlogDirs = append(logDirs, *logDir)\n\t}\n\tlogDirs = append(logDirs, os.TempDir())\n}\n\nvar (\n\tpid      = os.Getpid()\n\tprogram  = filepath.Base(os.Args[0])\n\thost     = \"unknownhost\"\n\tuserName = \"unknownuser\"\n)\n\nfunc init() {\n\th, err := os.Hostname()\n\tif err == nil {\n\t\thost = shortHostname(h)\n\t}\n\n\tcurrent, err := user.Current()\n\tif err == nil {\n\t\tuserName = current.Username\n\t}\n\n\t// Sanitize userName since it may contain filepath separators on Windows.\n\tuserName = strings.Replace(userName, `\\`, \"_\", -1)\n}\n\n// shortHostname returns its argument, truncating at the first period.\n// For instance, given \"www.google.com\" it returns \"www\".\nfunc shortHostname(hostname string) string {\n\tif i := strings.Index(hostname, \".\"); i >= 0 {\n\t\treturn hostname[:i]\n\t}\n\treturn hostname\n}\n\n// logName returns a new log file name containing tag, with start time t, and\n// the name for the symlink for tag.\nfunc logName(tag string, t time.Time) (name, link string) {\n\tname = fmt.Sprintf(\"%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d\",\n\t\tprogram,\n\t\thost,\n\t\tuserName,\n\t\ttag,\n\t\tt.Year(),\n\t\tt.Month(),\n\t\tt.Day(),\n\t\tt.Hour(),\n\t\tt.Minute(),\n\t\tt.Second(),\n\t\tpid)\n\treturn name, program + \".\" + tag\n}\n\nvar onceLogDirs sync.Once\n\n// create creates a new log file and returns the file and its filename, which\n// contains tag (\"INFO\", \"FATAL\", etc.) and t.  If the file is created\n// successfully, create also attempts to update the symlink for that tag, ignoring\n// errors.\nfunc create(tag string, t time.Time) (f *os.File, filename string, err error) {\n\tonceLogDirs.Do(createLogDirs)\n\tif len(logDirs) == 0 {\n\t\treturn nil, \"\", errors.New(\"log: no log dirs\")\n\t}\n\tname, link := logName(tag, t)\n\tvar lastErr error\n\tfor _, dir := range logDirs {\n\t\tfname := filepath.Join(dir, name)\n\t\tf, err := os.Create(fname)\n\t\tif err == nil {\n\t\t\tsymlink := filepath.Join(dir, link)\n\t\t\tos.Remove(symlink)        // ignore err\n\t\t\tos.Symlink(name, symlink) // ignore err\n\t\t\treturn f, fname, nil\n\t\t}\n\t\tlastErr = err\n\t}\n\treturn nil, \"\", fmt.Errorf(\"log: cannot create log: %v\", lastErr)\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/LICENSE",
    "content": "Go support for Protocol Buffers - Google's data interchange format\n\nCopyright 2010 The Go Authors.  All rights reserved.\nhttps://github.com/golang/protobuf\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/Makefile",
    "content": "# Go support for Protocol Buffers - Google's data interchange format\n#\n# Copyright 2010 The Go Authors.  All rights reserved.\n# https://github.com/golang/protobuf\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n#     * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#     * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n#     * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\ninstall:\n\tgo install\n\ntest: install generate-test-pbs\n\tgo test\n\n\ngenerate-test-pbs:\n\tmake install\n\tmake -C testdata\n\tprotoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto\n\tmake\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/clone.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Protocol buffer deep copy and merge.\n// TODO: MessageSet and RawMessage.\n\npackage proto\n\nimport (\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n// Clone returns a deep copy of a protocol buffer.\nfunc Clone(pb Message) Message {\n\tin := reflect.ValueOf(pb)\n\tif in.IsNil() {\n\t\treturn pb\n\t}\n\n\tout := reflect.New(in.Type().Elem())\n\t// out is empty so a merge is a deep copy.\n\tmergeStruct(out.Elem(), in.Elem())\n\treturn out.Interface().(Message)\n}\n\n// Merge merges src into dst.\n// Required and optional fields that are set in src will be set to that value in dst.\n// Elements of repeated fields will be appended.\n// Merge panics if src and dst are not the same type, or if dst is nil.\nfunc Merge(dst, src Message) {\n\tin := reflect.ValueOf(src)\n\tout := reflect.ValueOf(dst)\n\tif out.IsNil() {\n\t\tpanic(\"proto: nil destination\")\n\t}\n\tif in.Type() != out.Type() {\n\t\t// Explicit test prior to mergeStruct so that mistyped nils will fail\n\t\tpanic(\"proto: type mismatch\")\n\t}\n\tif in.IsNil() {\n\t\t// Merging nil into non-nil is a quiet no-op\n\t\treturn\n\t}\n\tmergeStruct(out.Elem(), in.Elem())\n}\n\nfunc mergeStruct(out, in reflect.Value) {\n\tfor i := 0; i < in.NumField(); i++ {\n\t\tf := in.Type().Field(i)\n\t\tif strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tcontinue\n\t\t}\n\t\tmergeAny(out.Field(i), in.Field(i))\n\t}\n\n\tif emIn, ok := in.Addr().Interface().(extendableProto); ok {\n\t\temOut := out.Addr().Interface().(extendableProto)\n\t\tmergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())\n\t}\n\n\tuf := in.FieldByName(\"XXX_unrecognized\")\n\tif !uf.IsValid() {\n\t\treturn\n\t}\n\tuin := uf.Bytes()\n\tif len(uin) > 0 {\n\t\tout.FieldByName(\"XXX_unrecognized\").SetBytes(append([]byte(nil), uin...))\n\t}\n}\n\nfunc mergeAny(out, in reflect.Value) {\n\tif in.Type() == protoMessageType {\n\t\tif !in.IsNil() {\n\t\t\tif out.IsNil() {\n\t\t\t\tout.Set(reflect.ValueOf(Clone(in.Interface().(Message))))\n\t\t\t} else {\n\t\t\t\tMerge(out.Interface().(Message), in.Interface().(Message))\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tswitch in.Kind() {\n\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,\n\t\treflect.String, reflect.Uint32, reflect.Uint64:\n\t\tout.Set(in)\n\tcase reflect.Map:\n\t\tif in.Len() == 0 {\n\t\t\treturn\n\t\t}\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.MakeMap(in.Type()))\n\t\t}\n\t\t// For maps with value types of *T or []byte we need to deep copy each value.\n\t\telemKind := in.Type().Elem().Kind()\n\t\tfor _, key := range in.MapKeys() {\n\t\t\tvar val reflect.Value\n\t\t\tswitch elemKind {\n\t\t\tcase reflect.Ptr:\n\t\t\t\tval = reflect.New(in.Type().Elem().Elem())\n\t\t\t\tmergeAny(val, in.MapIndex(key))\n\t\t\tcase reflect.Slice:\n\t\t\t\tval = in.MapIndex(key)\n\t\t\t\tval = reflect.ValueOf(append([]byte{}, val.Bytes()...))\n\t\t\tdefault:\n\t\t\t\tval = in.MapIndex(key)\n\t\t\t}\n\t\t\tout.SetMapIndex(key, val)\n\t\t}\n\tcase reflect.Ptr:\n\t\tif in.IsNil() {\n\t\t\treturn\n\t\t}\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.New(in.Elem().Type()))\n\t\t}\n\t\tmergeAny(out.Elem(), in.Elem())\n\tcase reflect.Slice:\n\t\tif in.IsNil() {\n\t\t\treturn\n\t\t}\n\t\tif in.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// []byte is a scalar bytes field, not a repeated field.\n\t\t\t// Make a deep copy.\n\t\t\t// Append to []byte{} instead of []byte(nil) so that we never end up\n\t\t\t// with a nil result.\n\t\t\tout.SetBytes(append([]byte{}, in.Bytes()...))\n\t\t\treturn\n\t\t}\n\t\tn := in.Len()\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.MakeSlice(in.Type(), 0, n))\n\t\t}\n\t\tswitch in.Type().Elem().Kind() {\n\t\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,\n\t\t\treflect.String, reflect.Uint32, reflect.Uint64:\n\t\t\tout.Set(reflect.AppendSlice(out, in))\n\t\tdefault:\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tx := reflect.Indirect(reflect.New(in.Type().Elem()))\n\t\t\t\tmergeAny(x, in.Index(i))\n\t\t\t\tout.Set(reflect.Append(out, x))\n\t\t\t}\n\t\t}\n\tcase reflect.Struct:\n\t\tmergeStruct(out, in)\n\tdefault:\n\t\t// unknown type, so not a protocol buffer\n\t\tlog.Printf(\"proto: don't know how to copy %v\", in)\n\t}\n}\n\nfunc mergeExtension(out, in map[int32]Extension) {\n\tfor extNum, eIn := range in {\n\t\teOut := Extension{desc: eIn.desc}\n\t\tif eIn.value != nil {\n\t\t\tv := reflect.New(reflect.TypeOf(eIn.value)).Elem()\n\t\t\tmergeAny(v, reflect.ValueOf(eIn.value))\n\t\t\teOut.value = v.Interface()\n\t\t}\n\t\tif eIn.enc != nil {\n\t\t\teOut.enc = make([]byte, len(eIn.enc))\n\t\t\tcopy(eOut.enc, eIn.enc)\n\t\t}\n\n\t\tout[extNum] = eOut\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/decode.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for decoding protocol buffer data to construct in-memory representations.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n)\n\n// errOverflow is returned when an integer is too large to be represented.\nvar errOverflow = errors.New(\"proto: integer overflow\")\n\n// The fundamental decoders that interpret bytes on the wire.\n// Those that take integer types all return uint64 and are\n// therefore of type valueDecoder.\n\n// DecodeVarint reads a varint-encoded integer from the slice.\n// It returns the integer and the number of bytes consumed, or\n// zero if there is not enough.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc DecodeVarint(buf []byte) (x uint64, n int) {\n\t// x, n already 0\n\tfor shift := uint(0); shift < 64; shift += 7 {\n\t\tif n >= len(buf) {\n\t\t\treturn 0, 0\n\t\t}\n\t\tb := uint64(buf[n])\n\t\tn++\n\t\tx |= (b & 0x7F) << shift\n\t\tif (b & 0x80) == 0 {\n\t\t\treturn x, n\n\t\t}\n\t}\n\n\t// The number is too large to represent in a 64-bit value.\n\treturn 0, 0\n}\n\n// DecodeVarint reads a varint-encoded integer from the Buffer.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc (p *Buffer) DecodeVarint() (x uint64, err error) {\n\t// x, err already 0\n\n\ti := p.index\n\tl := len(p.buf)\n\n\tfor shift := uint(0); shift < 64; shift += 7 {\n\t\tif i >= l {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t\treturn\n\t\t}\n\t\tb := p.buf[i]\n\t\ti++\n\t\tx |= (uint64(b) & 0x7F) << shift\n\t\tif b < 0x80 {\n\t\t\tp.index = i\n\t\t\treturn\n\t\t}\n\t}\n\n\t// The number is too large to represent in a 64-bit value.\n\terr = errOverflow\n\treturn\n}\n\n// DecodeFixed64 reads a 64-bit integer from the Buffer.\n// This is the format for the\n// fixed64, sfixed64, and double protocol buffer types.\nfunc (p *Buffer) DecodeFixed64() (x uint64, err error) {\n\t// x, err already 0\n\ti := p.index + 8\n\tif i < 0 || i > len(p.buf) {\n\t\terr = io.ErrUnexpectedEOF\n\t\treturn\n\t}\n\tp.index = i\n\n\tx = uint64(p.buf[i-8])\n\tx |= uint64(p.buf[i-7]) << 8\n\tx |= uint64(p.buf[i-6]) << 16\n\tx |= uint64(p.buf[i-5]) << 24\n\tx |= uint64(p.buf[i-4]) << 32\n\tx |= uint64(p.buf[i-3]) << 40\n\tx |= uint64(p.buf[i-2]) << 48\n\tx |= uint64(p.buf[i-1]) << 56\n\treturn\n}\n\n// DecodeFixed32 reads a 32-bit integer from the Buffer.\n// This is the format for the\n// fixed32, sfixed32, and float protocol buffer types.\nfunc (p *Buffer) DecodeFixed32() (x uint64, err error) {\n\t// x, err already 0\n\ti := p.index + 4\n\tif i < 0 || i > len(p.buf) {\n\t\terr = io.ErrUnexpectedEOF\n\t\treturn\n\t}\n\tp.index = i\n\n\tx = uint64(p.buf[i-4])\n\tx |= uint64(p.buf[i-3]) << 8\n\tx |= uint64(p.buf[i-2]) << 16\n\tx |= uint64(p.buf[i-1]) << 24\n\treturn\n}\n\n// DecodeZigzag64 reads a zigzag-encoded 64-bit integer\n// from the Buffer.\n// This is the format used for the sint64 protocol buffer type.\nfunc (p *Buffer) DecodeZigzag64() (x uint64, err error) {\n\tx, err = p.DecodeVarint()\n\tif err != nil {\n\t\treturn\n\t}\n\tx = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)\n\treturn\n}\n\n// DecodeZigzag32 reads a zigzag-encoded 32-bit integer\n// from  the Buffer.\n// This is the format used for the sint32 protocol buffer type.\nfunc (p *Buffer) DecodeZigzag32() (x uint64, err error) {\n\tx, err = p.DecodeVarint()\n\tif err != nil {\n\t\treturn\n\t}\n\tx = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))\n\treturn\n}\n\n// These are not ValueDecoders: they produce an array of bytes or a string.\n// bytes, embedded messages\n\n// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.\n// This is the format used for the bytes protocol buffer\n// type and for embedded messages.\nfunc (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {\n\tn, err := p.DecodeVarint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnb := int(n)\n\tif nb < 0 {\n\t\treturn nil, fmt.Errorf(\"proto: bad byte length %d\", nb)\n\t}\n\tend := p.index + nb\n\tif end < p.index || end > len(p.buf) {\n\t\treturn nil, io.ErrUnexpectedEOF\n\t}\n\n\tif !alloc {\n\t\t// todo: check if can get more uses of alloc=false\n\t\tbuf = p.buf[p.index:end]\n\t\tp.index += nb\n\t\treturn\n\t}\n\n\tbuf = make([]byte, nb)\n\tcopy(buf, p.buf[p.index:])\n\tp.index += nb\n\treturn\n}\n\n// DecodeStringBytes reads an encoded string from the Buffer.\n// This is the format used for the proto2 string type.\nfunc (p *Buffer) DecodeStringBytes() (s string, err error) {\n\tbuf, err := p.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn string(buf), nil\n}\n\n// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.\n// If the protocol buffer has extensions, and the field matches, add it as an extension.\n// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.\nfunc (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {\n\toi := o.index\n\n\terr := o.skip(t, tag, wire)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !unrecField.IsValid() {\n\t\treturn nil\n\t}\n\n\tptr := structPointer_Bytes(base, unrecField)\n\n\t// Add the skipped field to struct field\n\tobuf := o.buf\n\n\to.buf = *ptr\n\to.EncodeVarint(uint64(tag<<3 | wire))\n\t*ptr = append(o.buf, obuf[oi:o.index]...)\n\n\to.buf = obuf\n\n\treturn nil\n}\n\n// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.\nfunc (o *Buffer) skip(t reflect.Type, tag, wire int) error {\n\n\tvar u uint64\n\tvar err error\n\n\tswitch wire {\n\tcase WireVarint:\n\t\t_, err = o.DecodeVarint()\n\tcase WireFixed64:\n\t\t_, err = o.DecodeFixed64()\n\tcase WireBytes:\n\t\t_, err = o.DecodeRawBytes(false)\n\tcase WireFixed32:\n\t\t_, err = o.DecodeFixed32()\n\tcase WireStartGroup:\n\t\tfor {\n\t\t\tu, err = o.DecodeVarint()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfwire := int(u & 0x7)\n\t\t\tif fwire == WireEndGroup {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tftag := int(u >> 3)\n\t\t\terr = o.skip(t, ftag, fwire)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tdefault:\n\t\terr = fmt.Errorf(\"proto: can't skip unknown wire type %d for %s\", wire, t)\n\t}\n\treturn err\n}\n\n// Unmarshaler is the interface representing objects that can\n// unmarshal themselves.  The method should reset the receiver before\n// decoding starts.  The argument points to data that may be\n// overwritten, so implementations should not keep references to the\n// buffer.\ntype Unmarshaler interface {\n\tUnmarshal([]byte) error\n}\n\n// Unmarshal parses the protocol buffer representation in buf and places the\n// decoded result in pb.  If the struct underlying pb does not match\n// the data in buf, the results can be unpredictable.\n//\n// Unmarshal resets pb before starting to unmarshal, so any\n// existing data in pb is always removed. Use UnmarshalMerge\n// to preserve and append to existing data.\nfunc Unmarshal(buf []byte, pb Message) error {\n\tpb.Reset()\n\treturn UnmarshalMerge(buf, pb)\n}\n\n// UnmarshalMerge parses the protocol buffer representation in buf and\n// writes the decoded result to pb.  If the struct underlying pb does not match\n// the data in buf, the results can be unpredictable.\n//\n// UnmarshalMerge merges into existing data in pb.\n// Most code should use Unmarshal instead.\nfunc UnmarshalMerge(buf []byte, pb Message) error {\n\t// If the object can unmarshal itself, let it.\n\tif u, ok := pb.(Unmarshaler); ok {\n\t\treturn u.Unmarshal(buf)\n\t}\n\treturn NewBuffer(buf).Unmarshal(pb)\n}\n\n// Unmarshal parses the protocol buffer representation in the\n// Buffer and places the decoded result in pb.  If the struct\n// underlying pb does not match the data in the buffer, the results can be\n// unpredictable.\nfunc (p *Buffer) Unmarshal(pb Message) error {\n\t// If the object can unmarshal itself, let it.\n\tif u, ok := pb.(Unmarshaler); ok {\n\t\terr := u.Unmarshal(p.buf[p.index:])\n\t\tp.index = len(p.buf)\n\t\treturn err\n\t}\n\n\ttyp, base, err := getbase(pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)\n\n\tif collectStats {\n\t\tstats.Decode++\n\t}\n\n\treturn err\n}\n\n// unmarshalType does the work of unmarshaling a structure.\nfunc (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {\n\tvar state errorState\n\trequired, reqFields := prop.reqCount, uint64(0)\n\n\tvar err error\n\tfor err == nil && o.index < len(o.buf) {\n\t\toi := o.index\n\t\tvar u uint64\n\t\tu, err = o.DecodeVarint()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\twire := int(u & 0x7)\n\t\tif wire == WireEndGroup {\n\t\t\tif is_group {\n\t\t\t\treturn nil // input is satisfied\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"proto: %s: wiretype end group for non-group\", st)\n\t\t}\n\t\ttag := int(u >> 3)\n\t\tif tag <= 0 {\n\t\t\treturn fmt.Errorf(\"proto: %s: illegal tag %d (wire type %d)\", st, tag, wire)\n\t\t}\n\t\tfieldnum, ok := prop.decoderTags.get(tag)\n\t\tif !ok {\n\t\t\t// Maybe it's an extension?\n\t\t\tif prop.extendable {\n\t\t\t\tif e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {\n\t\t\t\t\tif err = o.skip(st, tag, wire); err == nil {\n\t\t\t\t\t\text := e.ExtensionMap()[int32(tag)] // may be missing\n\t\t\t\t\t\text.enc = append(ext.enc, o.buf[oi:o.index]...)\n\t\t\t\t\t\te.ExtensionMap()[int32(tag)] = ext\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = o.skipAndSave(st, tag, wire, base, prop.unrecField)\n\t\t\tcontinue\n\t\t}\n\t\tp := prop.Prop[fieldnum]\n\n\t\tif p.dec == nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"proto: no protobuf decoder for %s.%s\\n\", st, st.Field(fieldnum).Name)\n\t\t\tcontinue\n\t\t}\n\t\tdec := p.dec\n\t\tif wire != WireStartGroup && wire != p.WireType {\n\t\t\tif wire == WireBytes && p.packedDec != nil {\n\t\t\t\t// a packable field\n\t\t\t\tdec = p.packedDec\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"proto: bad wiretype for field %s.%s: got wiretype %d, want %d\", st, st.Field(fieldnum).Name, wire, p.WireType)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tdecErr := dec(o, p, base)\n\t\tif decErr != nil && !state.shouldContinue(decErr, p) {\n\t\t\terr = decErr\n\t\t}\n\t\tif err == nil && p.Required {\n\t\t\t// Successfully decoded a required field.\n\t\t\tif tag <= 64 {\n\t\t\t\t// use bitmap for fields 1-64 to catch field reuse.\n\t\t\t\tvar mask uint64 = 1 << uint64(tag-1)\n\t\t\t\tif reqFields&mask == 0 {\n\t\t\t\t\t// new required field\n\t\t\t\t\treqFields |= mask\n\t\t\t\t\trequired--\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// This is imprecise. It can be fooled by a required field\n\t\t\t\t// with a tag > 64 that is encoded twice; that's very rare.\n\t\t\t\t// A fully correct implementation would require allocating\n\t\t\t\t// a data structure, which we would like to avoid.\n\t\t\t\trequired--\n\t\t\t}\n\t\t}\n\t}\n\tif err == nil {\n\t\tif is_group {\n\t\t\treturn io.ErrUnexpectedEOF\n\t\t}\n\t\tif state.err != nil {\n\t\t\treturn state.err\n\t\t}\n\t\tif required > 0 {\n\t\t\t// Not enough information to determine the exact field. If we use extra\n\t\t\t// CPU, we could determine the field only if the missing required field\n\t\t\t// has a tag <= 64 and we check reqFields.\n\t\t\treturn &RequiredNotSetError{\"{Unknown}\"}\n\t\t}\n\t}\n\treturn err\n}\n\n// Individual type decoders\n// For each,\n//\tu is the decoded value,\n//\tv is a pointer to the field (pointer) in the struct\n\n// Sizes of the pools to allocate inside the Buffer.\n// The goal is modest amortization and allocation\n// on at least 16-byte boundaries.\nconst (\n\tboolPoolSize   = 16\n\tuint32PoolSize = 8\n\tuint64PoolSize = 4\n)\n\n// Decode a bool.\nfunc (o *Buffer) dec_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(o.bools) == 0 {\n\t\to.bools = make([]bool, boolPoolSize)\n\t}\n\to.bools[0] = u != 0\n\t*structPointer_Bool(base, p.field) = &o.bools[0]\n\to.bools = o.bools[1:]\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_BoolVal(base, p.field) = u != 0\n\treturn nil\n}\n\n// Decode an int32.\nfunc (o *Buffer) dec_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword32_Set(structPointer_Word32(base, p.field), o, uint32(u))\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))\n\treturn nil\n}\n\n// Decode an int64.\nfunc (o *Buffer) dec_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword64_Set(structPointer_Word64(base, p.field), o, u)\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword64Val_Set(structPointer_Word64Val(base, p.field), o, u)\n\treturn nil\n}\n\n// Decode a string.\nfunc (o *Buffer) dec_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_String(base, p.field) = &s\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_StringVal(base, p.field) = s\n\treturn nil\n}\n\n// Decode a slice of bytes ([]byte).\nfunc (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {\n\tb, err := o.DecodeRawBytes(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_Bytes(base, p.field) = b\n\treturn nil\n}\n\n// Decode a slice of bools ([]bool).\nfunc (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_BoolSlice(base, p.field)\n\t*v = append(*v, u != 0)\n\treturn nil\n}\n\n// Decode a slice of bools ([]bool) in packed format.\nfunc (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {\n\tv := structPointer_BoolSlice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded bools\n\n\ty := *v\n\tfor i := 0; i < nb; i++ {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ty = append(y, u != 0)\n\t}\n\n\t*v = y\n\treturn nil\n}\n\n// Decode a slice of int32s ([]int32).\nfunc (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstructPointer_Word32Slice(base, p.field).Append(uint32(u))\n\treturn nil\n}\n\n// Decode a slice of int32s ([]int32) in packed format.\nfunc (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Slice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded int32s\n\n\tfin := o.index + nb\n\tif fin < o.index {\n\t\treturn errOverflow\n\t}\n\tfor o.index < fin {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Append(uint32(u))\n\t}\n\treturn nil\n}\n\n// Decode a slice of int64s ([]int64).\nfunc (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstructPointer_Word64Slice(base, p.field).Append(u)\n\treturn nil\n}\n\n// Decode a slice of int64s ([]int64) in packed format.\nfunc (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64Slice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded int64s\n\n\tfin := o.index + nb\n\tif fin < o.index {\n\t\treturn errOverflow\n\t}\n\tfor o.index < fin {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Append(u)\n\t}\n\treturn nil\n}\n\n// Decode a slice of strings ([]string).\nfunc (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_StringSlice(base, p.field)\n\t*v = append(*v, s)\n\treturn nil\n}\n\n// Decode a slice of slice of bytes ([][]byte).\nfunc (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {\n\tb, err := o.DecodeRawBytes(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_BytesSlice(base, p.field)\n\t*v = append(*v, b)\n\treturn nil\n}\n\n// Decode a map field.\nfunc (o *Buffer) dec_new_map(p *Properties, base structPointer) error {\n\traw, err := o.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\toi := o.index       // index at the end of this map entry\n\to.index -= len(raw) // move buffer back to start of map entry\n\n\tmptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V\n\tif mptr.Elem().IsNil() {\n\t\tmptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))\n\t}\n\tv := mptr.Elem() // map[K]V\n\n\t// Prepare addressable doubly-indirect placeholders for the key and value types.\n\t// See enc_new_map for why.\n\tkeyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K\n\tkeybase := toStructPointer(keyptr.Addr())                  // **K\n\n\tvar valbase structPointer\n\tvar valptr reflect.Value\n\tswitch p.mtype.Elem().Kind() {\n\tcase reflect.Slice:\n\t\t// []byte\n\t\tvar dummy []byte\n\t\tvalptr = reflect.ValueOf(&dummy)  // *[]byte\n\t\tvalbase = toStructPointer(valptr) // *[]byte\n\tcase reflect.Ptr:\n\t\t// message; valptr is **Msg; need to allocate the intermediate pointer\n\t\tvalptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V\n\t\tvalptr.Set(reflect.New(valptr.Type().Elem()))\n\t\tvalbase = toStructPointer(valptr)\n\tdefault:\n\t\t// everything else\n\t\tvalptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V\n\t\tvalbase = toStructPointer(valptr.Addr())                   // **V\n\t}\n\n\t// Decode.\n\t// This parses a restricted wire format, namely the encoding of a message\n\t// with two fields. See enc_new_map for the format.\n\tfor o.index < oi {\n\t\t// tagcode for key and value properties are always a single byte\n\t\t// because they have tags 1 and 2.\n\t\ttagcode := o.buf[o.index]\n\t\to.index++\n\t\tswitch tagcode {\n\t\tcase p.mkeyprop.tagcode[0]:\n\t\t\tif err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase p.mvalprop.tagcode[0]:\n\t\t\tif err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO: Should we silently skip this instead?\n\t\t\treturn fmt.Errorf(\"proto: bad map data tag %d\", raw[0])\n\t\t}\n\t}\n\tkeyelem, valelem := keyptr.Elem(), valptr.Elem()\n\tif !keyelem.IsValid() || !valelem.IsValid() {\n\t\t// We did not decode the key or the value in the map entry.\n\t\t// Either way, it's an invalid map entry.\n\t\treturn fmt.Errorf(\"proto: bad map data: missing key/val\")\n\t}\n\n\tv.SetMapIndex(keyelem, valelem)\n\treturn nil\n}\n\n// Decode a group.\nfunc (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {\n\tbas := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(bas) {\n\t\t// allocate new nested message\n\t\tbas = toStructPointer(reflect.New(p.stype))\n\t\tstructPointer_SetStructPointer(base, p.field, bas)\n\t}\n\treturn o.unmarshalType(p.stype, p.sprop, true, bas)\n}\n\n// Decode an embedded message.\nfunc (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {\n\traw, e := o.DecodeRawBytes(false)\n\tif e != nil {\n\t\treturn e\n\t}\n\n\tbas := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(bas) {\n\t\t// allocate new nested message\n\t\tbas = toStructPointer(reflect.New(p.stype))\n\t\tstructPointer_SetStructPointer(base, p.field, bas)\n\t}\n\n\t// If the object can unmarshal itself, let it.\n\tif p.isUnmarshaler {\n\t\tiv := structPointer_Interface(bas, p.stype)\n\t\treturn iv.(Unmarshaler).Unmarshal(raw)\n\t}\n\n\tobuf := o.buf\n\toi := o.index\n\to.buf = raw\n\to.index = 0\n\n\terr = o.unmarshalType(p.stype, p.sprop, false, bas)\n\to.buf = obuf\n\to.index = oi\n\n\treturn err\n}\n\n// Decode a slice of embedded messages.\nfunc (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {\n\treturn o.dec_slice_struct(p, false, base)\n}\n\n// Decode a slice of embedded groups.\nfunc (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {\n\treturn o.dec_slice_struct(p, true, base)\n}\n\n// Decode a slice of structs ([]*struct).\nfunc (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {\n\tv := reflect.New(p.stype)\n\tbas := toStructPointer(v)\n\tstructPointer_StructPointerSlice(base, p.field).Append(bas)\n\n\tif is_group {\n\t\terr := o.unmarshalType(p.stype, p.sprop, is_group, bas)\n\t\treturn err\n\t}\n\n\traw, err := o.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the object can unmarshal itself, let it.\n\tif p.isUnmarshaler {\n\t\tiv := v.Interface()\n\t\treturn iv.(Unmarshaler).Unmarshal(raw)\n\t}\n\n\tobuf := o.buf\n\toi := o.index\n\to.buf = raw\n\to.index = 0\n\n\terr = o.unmarshalType(p.stype, p.sprop, is_group, bas)\n\n\to.buf = obuf\n\to.index = oi\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/encode.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for encoding data into the wire format for protocol buffers.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n)\n\n// RequiredNotSetError is the error returned if Marshal is called with\n// a protocol buffer struct whose required fields have not\n// all been initialized. It is also the error returned if Unmarshal is\n// called with an encoded protocol buffer that does not include all the\n// required fields.\n//\n// When printed, RequiredNotSetError reports the first unset required field in a\n// message. If the field cannot be precisely determined, it is reported as\n// \"{Unknown}\".\ntype RequiredNotSetError struct {\n\tfield string\n}\n\nfunc (e *RequiredNotSetError) Error() string {\n\treturn fmt.Sprintf(\"proto: required field %q not set\", e.field)\n}\n\nvar (\n\t// errRepeatedHasNil is the error returned if Marshal is called with\n\t// a struct with a repeated field containing a nil element.\n\terrRepeatedHasNil = errors.New(\"proto: repeated field has nil element\")\n\n\t// ErrNil is the error returned if Marshal is called with nil.\n\tErrNil = errors.New(\"proto: Marshal called with nil\")\n)\n\n// The fundamental encoders that put bytes on the wire.\n// Those that take integer types all accept uint64 and are\n// therefore of type valueEncoder.\n\nconst maxVarintBytes = 10 // maximum length of a varint\n\n// EncodeVarint returns the varint encoding of x.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\n// Not used by the package itself, but helpful to clients\n// wishing to use the same encoding.\nfunc EncodeVarint(x uint64) []byte {\n\tvar buf [maxVarintBytes]byte\n\tvar n int\n\tfor n = 0; x > 127; n++ {\n\t\tbuf[n] = 0x80 | uint8(x&0x7F)\n\t\tx >>= 7\n\t}\n\tbuf[n] = uint8(x)\n\tn++\n\treturn buf[0:n]\n}\n\n// EncodeVarint writes a varint-encoded integer to the Buffer.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc (p *Buffer) EncodeVarint(x uint64) error {\n\tfor x >= 1<<7 {\n\t\tp.buf = append(p.buf, uint8(x&0x7f|0x80))\n\t\tx >>= 7\n\t}\n\tp.buf = append(p.buf, uint8(x))\n\treturn nil\n}\n\nfunc sizeVarint(x uint64) (n int) {\n\tfor {\n\t\tn++\n\t\tx >>= 7\n\t\tif x == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn n\n}\n\n// EncodeFixed64 writes a 64-bit integer to the Buffer.\n// This is the format for the\n// fixed64, sfixed64, and double protocol buffer types.\nfunc (p *Buffer) EncodeFixed64(x uint64) error {\n\tp.buf = append(p.buf,\n\t\tuint8(x),\n\t\tuint8(x>>8),\n\t\tuint8(x>>16),\n\t\tuint8(x>>24),\n\t\tuint8(x>>32),\n\t\tuint8(x>>40),\n\t\tuint8(x>>48),\n\t\tuint8(x>>56))\n\treturn nil\n}\n\nfunc sizeFixed64(x uint64) int {\n\treturn 8\n}\n\n// EncodeFixed32 writes a 32-bit integer to the Buffer.\n// This is the format for the\n// fixed32, sfixed32, and float protocol buffer types.\nfunc (p *Buffer) EncodeFixed32(x uint64) error {\n\tp.buf = append(p.buf,\n\t\tuint8(x),\n\t\tuint8(x>>8),\n\t\tuint8(x>>16),\n\t\tuint8(x>>24))\n\treturn nil\n}\n\nfunc sizeFixed32(x uint64) int {\n\treturn 4\n}\n\n// EncodeZigzag64 writes a zigzag-encoded 64-bit integer\n// to the Buffer.\n// This is the format used for the sint64 protocol buffer type.\nfunc (p *Buffer) EncodeZigzag64(x uint64) error {\n\t// use signed number to get arithmetic right shift.\n\treturn p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))\n}\n\nfunc sizeZigzag64(x uint64) int {\n\treturn sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))\n}\n\n// EncodeZigzag32 writes a zigzag-encoded 32-bit integer\n// to the Buffer.\n// This is the format used for the sint32 protocol buffer type.\nfunc (p *Buffer) EncodeZigzag32(x uint64) error {\n\t// use signed number to get arithmetic right shift.\n\treturn p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))\n}\n\nfunc sizeZigzag32(x uint64) int {\n\treturn sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))\n}\n\n// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.\n// This is the format used for the bytes protocol buffer\n// type and for embedded messages.\nfunc (p *Buffer) EncodeRawBytes(b []byte) error {\n\tp.EncodeVarint(uint64(len(b)))\n\tp.buf = append(p.buf, b...)\n\treturn nil\n}\n\nfunc sizeRawBytes(b []byte) int {\n\treturn sizeVarint(uint64(len(b))) +\n\t\tlen(b)\n}\n\n// EncodeStringBytes writes an encoded string to the Buffer.\n// This is the format used for the proto2 string type.\nfunc (p *Buffer) EncodeStringBytes(s string) error {\n\tp.EncodeVarint(uint64(len(s)))\n\tp.buf = append(p.buf, s...)\n\treturn nil\n}\n\nfunc sizeStringBytes(s string) int {\n\treturn sizeVarint(uint64(len(s))) +\n\t\tlen(s)\n}\n\n// Marshaler is the interface representing objects that can marshal themselves.\ntype Marshaler interface {\n\tMarshal() ([]byte, error)\n}\n\n// Marshal takes the protocol buffer\n// and encodes it into the wire format, returning the data.\nfunc Marshal(pb Message) ([]byte, error) {\n\t// Can the object marshal itself?\n\tif m, ok := pb.(Marshaler); ok {\n\t\treturn m.Marshal()\n\t}\n\tp := NewBuffer(nil)\n\terr := p.Marshal(pb)\n\tvar state errorState\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn nil, err\n\t}\n\tif p.buf == nil && err == nil {\n\t\t// Return a non-nil slice on success.\n\t\treturn []byte{}, nil\n\t}\n\treturn p.buf, err\n}\n\n// Marshal takes the protocol buffer\n// and encodes it into the wire format, writing the result to the\n// Buffer.\nfunc (p *Buffer) Marshal(pb Message) error {\n\t// Can the object marshal itself?\n\tif m, ok := pb.(Marshaler); ok {\n\t\tdata, err := m.Marshal()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.buf = append(p.buf, data...)\n\t\treturn nil\n\t}\n\n\tt, base, err := getbase(pb)\n\tif structPointer_IsNil(base) {\n\t\treturn ErrNil\n\t}\n\tif err == nil {\n\t\terr = p.enc_struct(GetProperties(t.Elem()), base)\n\t}\n\n\tif collectStats {\n\t\tstats.Encode++\n\t}\n\n\treturn err\n}\n\n// Size returns the encoded size of a protocol buffer.\nfunc Size(pb Message) (n int) {\n\t// Can the object marshal itself?  If so, Size is slow.\n\t// TODO: add Size to Marshaler, or add a Sizer interface.\n\tif m, ok := pb.(Marshaler); ok {\n\t\tb, _ := m.Marshal()\n\t\treturn len(b)\n\t}\n\n\tt, base, err := getbase(pb)\n\tif structPointer_IsNil(base) {\n\t\treturn 0\n\t}\n\tif err == nil {\n\t\tn = size_struct(GetProperties(t.Elem()), base)\n\t}\n\n\tif collectStats {\n\t\tstats.Size++\n\t}\n\n\treturn\n}\n\n// Individual type encoders.\n\n// Encode a bool.\nfunc (o *Buffer) enc_bool(p *Properties, base structPointer) error {\n\tv := *structPointer_Bool(base, p.field)\n\tif v == nil {\n\t\treturn ErrNil\n\t}\n\tx := 0\n\tif *v {\n\t\tx = 1\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {\n\tv := *structPointer_BoolVal(base, p.field)\n\tif !v {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, 1)\n\treturn nil\n}\n\nfunc size_bool(p *Properties, base structPointer) int {\n\tv := *structPointer_Bool(base, p.field)\n\tif v == nil {\n\t\treturn 0\n\t}\n\treturn len(p.tagcode) + 1 // each bool takes exactly one byte\n}\n\nfunc size_proto3_bool(p *Properties, base structPointer) int {\n\tv := *structPointer_BoolVal(base, p.field)\n\tif !v {\n\t\treturn 0\n\t}\n\treturn len(p.tagcode) + 1 // each bool takes exactly one byte\n}\n\n// Encode an int32.\nfunc (o *Buffer) enc_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := int32(word32_Get(v)) // permit sign extension to use full 64-bit range\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc size_int32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := int32(word32_Get(v)) // permit sign extension to use full 64-bit range\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\nfunc size_proto3_int32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range\n\tif x == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\n// Encode a uint32.\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_uint32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := word32_Get(v)\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := word32Val_Get(v)\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc size_uint32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := word32_Get(v)\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\nfunc size_proto3_uint32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := word32Val_Get(v)\n\tif x == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\n// Encode an int64.\nfunc (o *Buffer) enc_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64(base, p.field)\n\tif word64_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := word64_Get(v)\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, x)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64Val(base, p.field)\n\tx := word64Val_Get(v)\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, x)\n\treturn nil\n}\n\nfunc size_int64(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word64(base, p.field)\n\tif word64_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := word64_Get(v)\n\tn += len(p.tagcode)\n\tn += p.valSize(x)\n\treturn\n}\n\nfunc size_proto3_int64(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word64Val(base, p.field)\n\tx := word64Val_Get(v)\n\tif x == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(x)\n\treturn\n}\n\n// Encode a string.\nfunc (o *Buffer) enc_string(p *Properties, base structPointer) error {\n\tv := *structPointer_String(base, p.field)\n\tif v == nil {\n\t\treturn ErrNil\n\t}\n\tx := *v\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeStringBytes(x)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {\n\tv := *structPointer_StringVal(base, p.field)\n\tif v == \"\" {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeStringBytes(v)\n\treturn nil\n}\n\nfunc size_string(p *Properties, base structPointer) (n int) {\n\tv := *structPointer_String(base, p.field)\n\tif v == nil {\n\t\treturn 0\n\t}\n\tx := *v\n\tn += len(p.tagcode)\n\tn += sizeStringBytes(x)\n\treturn\n}\n\nfunc size_proto3_string(p *Properties, base structPointer) (n int) {\n\tv := *structPointer_StringVal(base, p.field)\n\tif v == \"\" {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeStringBytes(v)\n\treturn\n}\n\n// All protocol buffer fields are nillable, but be careful.\nfunc isNil(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:\n\t\treturn v.IsNil()\n\t}\n\treturn false\n}\n\n// Encode a message struct.\nfunc (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {\n\tvar state errorState\n\tstructp := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(structp) {\n\t\treturn ErrNil\n\t}\n\n\t// Can the object marshal itself?\n\tif p.isMarshaler {\n\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\tdata, err := m.Marshal()\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\treturn err\n\t\t}\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeRawBytes(data)\n\t\treturn nil\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\treturn o.enc_len_struct(p.sprop, structp, &state)\n}\n\nfunc size_struct_message(p *Properties, base structPointer) int {\n\tstructp := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(structp) {\n\t\treturn 0\n\t}\n\n\t// Can the object marshal itself?\n\tif p.isMarshaler {\n\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\tdata, _ := m.Marshal()\n\t\tn0 := len(p.tagcode)\n\t\tn1 := sizeRawBytes(data)\n\t\treturn n0 + n1\n\t}\n\n\tn0 := len(p.tagcode)\n\tn1 := size_struct(p.sprop, structp)\n\tn2 := sizeVarint(uint64(n1)) // size of encoded length\n\treturn n0 + n1 + n2\n}\n\n// Encode a group struct.\nfunc (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {\n\tvar state errorState\n\tb := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(b) {\n\t\treturn ErrNil\n\t}\n\n\to.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\terr := o.enc_struct(p.sprop, b)\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn err\n\t}\n\to.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\treturn state.err\n}\n\nfunc size_struct_group(p *Properties, base structPointer) (n int) {\n\tb := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(b) {\n\t\treturn 0\n\t}\n\n\tn += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\tn += size_struct(p.sprop, b)\n\tn += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\treturn\n}\n\n// Encode a slice of bools ([]bool).\nfunc (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor _, x := range s {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tv := uint64(0)\n\t\tif x {\n\t\t\tv = 1\n\t\t}\n\t\tp.valEnc(o, v)\n\t}\n\treturn nil\n}\n\nfunc size_slice_bool(p *Properties, base structPointer) int {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\treturn l * (len(p.tagcode) + 1) // each bool takes exactly one byte\n}\n\n// Encode a slice of bools ([]bool) in packed format.\nfunc (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(l)) // each bool takes exactly one byte\n\tfor _, x := range s {\n\t\tv := uint64(0)\n\t\tif x {\n\t\t\tv = 1\n\t\t}\n\t\tp.valEnc(o, v)\n\t}\n\treturn nil\n}\n\nfunc size_slice_packed_bool(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(l))\n\tn += l // each bool takes exactly one byte\n\treturn\n}\n\n// Encode a slice of bytes ([]byte).\nfunc (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {\n\ts := *structPointer_Bytes(base, p.field)\n\tif s == nil {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeRawBytes(s)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {\n\ts := *structPointer_Bytes(base, p.field)\n\tif len(s) == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeRawBytes(s)\n\treturn nil\n}\n\nfunc size_slice_byte(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_Bytes(base, p.field)\n\tif s == nil {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeRawBytes(s)\n\treturn\n}\n\nfunc size_proto3_slice_byte(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_Bytes(base, p.field)\n\tif len(s) == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeRawBytes(s)\n\treturn\n}\n\n// Encode a slice of int32s ([]int32).\nfunc (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tp.valEnc(o, uint64(x))\n\t}\n\treturn nil\n}\n\nfunc size_slice_int32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tn += p.valSize(uint64(x))\n\t}\n\treturn\n}\n\n// Encode a slice of int32s ([]int32) in packed format.\nfunc (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tp.valEnc(buf, uint64(x))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_int32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tbufSize += p.valSize(uint64(x))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of uint32s ([]uint32).\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tx := s.Index(i)\n\t\tp.valEnc(o, uint64(x))\n\t}\n\treturn nil\n}\n\nfunc size_slice_uint32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tx := s.Index(i)\n\t\tn += p.valSize(uint64(x))\n\t}\n\treturn\n}\n\n// Encode a slice of uint32s ([]uint32) in packed format.\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tp.valEnc(buf, uint64(s.Index(i)))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_uint32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tbufSize += p.valSize(uint64(s.Index(i)))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of int64s ([]int64).\nfunc (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tp.valEnc(o, s.Index(i))\n\t}\n\treturn nil\n}\n\nfunc size_slice_int64(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tn += p.valSize(s.Index(i))\n\t}\n\treturn\n}\n\n// Encode a slice of int64s ([]int64) in packed format.\nfunc (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tp.valEnc(buf, s.Index(i))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_int64(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tbufSize += p.valSize(s.Index(i))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of slice of bytes ([][]byte).\nfunc (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {\n\tss := *structPointer_BytesSlice(base, p.field)\n\tl := len(ss)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeRawBytes(ss[i])\n\t}\n\treturn nil\n}\n\nfunc size_slice_slice_byte(p *Properties, base structPointer) (n int) {\n\tss := *structPointer_BytesSlice(base, p.field)\n\tl := len(ss)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tn += sizeRawBytes(ss[i])\n\t}\n\treturn\n}\n\n// Encode a slice of strings ([]string).\nfunc (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {\n\tss := *structPointer_StringSlice(base, p.field)\n\tl := len(ss)\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeStringBytes(ss[i])\n\t}\n\treturn nil\n}\n\nfunc size_slice_string(p *Properties, base structPointer) (n int) {\n\tss := *structPointer_StringSlice(base, p.field)\n\tl := len(ss)\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tn += sizeStringBytes(ss[i])\n\t}\n\treturn\n}\n\n// Encode a slice of message structs ([]*struct).\nfunc (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {\n\tvar state errorState\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tfor i := 0; i < l; i++ {\n\t\tstructp := s.Index(i)\n\t\tif structPointer_IsNil(structp) {\n\t\t\treturn errRepeatedHasNil\n\t\t}\n\n\t\t// Can the object marshal itself?\n\t\tif p.isMarshaler {\n\t\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\t\tdata, err := m.Marshal()\n\t\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\to.buf = append(o.buf, p.tagcode...)\n\t\t\to.EncodeRawBytes(data)\n\t\t\tcontinue\n\t\t}\n\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\terr := o.enc_len_struct(p.sprop, structp, &state)\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\tif err == ErrNil {\n\t\t\t\treturn errRepeatedHasNil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn state.err\n}\n\nfunc size_slice_struct_message(p *Properties, base structPointer) (n int) {\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tstructp := s.Index(i)\n\t\tif structPointer_IsNil(structp) {\n\t\t\treturn // return the size up to this point\n\t\t}\n\n\t\t// Can the object marshal itself?\n\t\tif p.isMarshaler {\n\t\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\t\tdata, _ := m.Marshal()\n\t\t\tn += len(p.tagcode)\n\t\t\tn += sizeRawBytes(data)\n\t\t\tcontinue\n\t\t}\n\n\t\tn0 := size_struct(p.sprop, structp)\n\t\tn1 := sizeVarint(uint64(n0)) // size of encoded length\n\t\tn += n0 + n1\n\t}\n\treturn\n}\n\n// Encode a slice of group structs ([]*struct).\nfunc (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {\n\tvar state errorState\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tfor i := 0; i < l; i++ {\n\t\tb := s.Index(i)\n\t\tif structPointer_IsNil(b) {\n\t\t\treturn errRepeatedHasNil\n\t\t}\n\n\t\to.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\n\t\terr := o.enc_struct(p.sprop, b)\n\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\tif err == ErrNil {\n\t\t\t\treturn errRepeatedHasNil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\to.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\t}\n\treturn state.err\n}\n\nfunc size_slice_struct_group(p *Properties, base structPointer) (n int) {\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tn += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))\n\tn += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))\n\tfor i := 0; i < l; i++ {\n\t\tb := s.Index(i)\n\t\tif structPointer_IsNil(b) {\n\t\t\treturn // return size up to this point\n\t\t}\n\n\t\tn += size_struct(p.sprop, b)\n\t}\n\treturn\n}\n\n// Encode an extension map.\nfunc (o *Buffer) enc_map(p *Properties, base structPointer) error {\n\tv := *structPointer_ExtMap(base, p.field)\n\tif err := encodeExtensionMap(v); err != nil {\n\t\treturn err\n\t}\n\t// Fast-path for common cases: zero or one extensions.\n\tif len(v) <= 1 {\n\t\tfor _, e := range v {\n\t\t\to.buf = append(o.buf, e.enc...)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Sort keys to provide a deterministic encoding.\n\tkeys := make([]int, 0, len(v))\n\tfor k := range v {\n\t\tkeys = append(keys, int(k))\n\t}\n\tsort.Ints(keys)\n\n\tfor _, k := range keys {\n\t\to.buf = append(o.buf, v[int32(k)].enc...)\n\t}\n\treturn nil\n}\n\nfunc size_map(p *Properties, base structPointer) int {\n\tv := *structPointer_ExtMap(base, p.field)\n\treturn sizeExtensionMap(v)\n}\n\n// Encode a map field.\nfunc (o *Buffer) enc_new_map(p *Properties, base structPointer) error {\n\tvar state errorState // XXX: or do we need to plumb this through?\n\n\t/*\n\t\tA map defined as\n\t\t\tmap<key_type, value_type> map_field = N;\n\t\tis encoded in the same way as\n\t\t\tmessage MapFieldEntry {\n\t\t\t\tkey_type key = 1;\n\t\t\t\tvalue_type value = 2;\n\t\t\t}\n\t\t\trepeated MapFieldEntry map_field = N;\n\t*/\n\n\tv := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V\n\tif v.Len() == 0 {\n\t\treturn nil\n\t}\n\n\tkeycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)\n\n\tenc := func() error {\n\t\tif err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tkeys := v.MapKeys()\n\tsort.Sort(mapKeys(keys))\n\tfor _, key := range keys {\n\t\tval := v.MapIndex(key)\n\n\t\t// The only illegal map entry values are nil message pointers.\n\t\tif val.Kind() == reflect.Ptr && val.IsNil() {\n\t\t\treturn errors.New(\"proto: map has nil element\")\n\t\t}\n\n\t\tkeycopy.Set(key)\n\t\tvalcopy.Set(val)\n\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tif err := o.enc_len_thing(enc, &state); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc size_new_map(p *Properties, base structPointer) int {\n\tv := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V\n\n\tkeycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)\n\n\tn := 0\n\tfor _, key := range v.MapKeys() {\n\t\tval := v.MapIndex(key)\n\t\tkeycopy.Set(key)\n\t\tvalcopy.Set(val)\n\n\t\t// Tag codes for key and val are the responsibility of the sub-sizer.\n\t\tkeysize := p.mkeyprop.size(p.mkeyprop, keybase)\n\t\tvalsize := p.mvalprop.size(p.mvalprop, valbase)\n\t\tentry := keysize + valsize\n\t\t// Add on tag code and length of map entry itself.\n\t\tn += len(p.tagcode) + sizeVarint(uint64(entry)) + entry\n\t}\n\treturn n\n}\n\n// mapEncodeScratch returns a new reflect.Value matching the map's value type,\n// and a structPointer suitable for passing to an encoder or sizer.\nfunc mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {\n\t// Prepare addressable doubly-indirect placeholders for the key and value types.\n\t// This is needed because the element-type encoders expect **T, but the map iteration produces T.\n\n\tkeycopy = reflect.New(mapType.Key()).Elem()                 // addressable K\n\tkeyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K\n\tkeyptr.Set(keycopy.Addr())                                  //\n\tkeybase = toStructPointer(keyptr.Addr())                    // **K\n\n\t// Value types are more varied and require special handling.\n\tswitch mapType.Elem().Kind() {\n\tcase reflect.Slice:\n\t\t// []byte\n\t\tvar dummy []byte\n\t\tvalcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte\n\t\tvalbase = toStructPointer(valcopy.Addr())\n\tcase reflect.Ptr:\n\t\t// message; the generated field type is map[K]*Msg (so V is *Msg),\n\t\t// so we only need one level of indirection.\n\t\tvalcopy = reflect.New(mapType.Elem()).Elem() // addressable V\n\t\tvalbase = toStructPointer(valcopy.Addr())\n\tdefault:\n\t\t// everything else\n\t\tvalcopy = reflect.New(mapType.Elem()).Elem()                // addressable V\n\t\tvalptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V\n\t\tvalptr.Set(valcopy.Addr())                                  //\n\t\tvalbase = toStructPointer(valptr.Addr())                    // **V\n\t}\n\treturn\n}\n\n// Encode a struct.\nfunc (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {\n\tvar state errorState\n\t// Encode fields in tag order so that decoders may use optimizations\n\t// that depend on the ordering.\n\t// https://developers.google.com/protocol-buffers/docs/encoding#order\n\tfor _, i := range prop.order {\n\t\tp := prop.Prop[i]\n\t\tif p.enc != nil {\n\t\t\terr := p.enc(o, p, base)\n\t\t\tif err != nil {\n\t\t\t\tif err == ErrNil {\n\t\t\t\t\tif p.Required && state.err == nil {\n\t\t\t\t\t\tstate.err = &RequiredNotSetError{p.Name}\n\t\t\t\t\t}\n\t\t\t\t} else if err == errRepeatedHasNil {\n\t\t\t\t\t// Give more context to nil values in repeated fields.\n\t\t\t\t\treturn errors.New(\"repeated field \" + p.OrigName + \" has nil element\")\n\t\t\t\t} else if !state.shouldContinue(err, p) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Add unrecognized fields at the end.\n\tif prop.unrecField.IsValid() {\n\t\tv := *structPointer_Bytes(base, prop.unrecField)\n\t\tif len(v) > 0 {\n\t\t\to.buf = append(o.buf, v...)\n\t\t}\n\t}\n\n\treturn state.err\n}\n\nfunc size_struct(prop *StructProperties, base structPointer) (n int) {\n\tfor _, i := range prop.order {\n\t\tp := prop.Prop[i]\n\t\tif p.size != nil {\n\t\t\tn += p.size(p, base)\n\t\t}\n\t}\n\n\t// Add unrecognized fields at the end.\n\tif prop.unrecField.IsValid() {\n\t\tv := *structPointer_Bytes(base, prop.unrecField)\n\t\tn += len(v)\n\t}\n\n\treturn\n}\n\nvar zeroes [20]byte // longer than any conceivable sizeVarint\n\n// Encode a struct, preceded by its encoded length (as a varint).\nfunc (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {\n\treturn o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)\n}\n\n// Encode something, preceded by its encoded length (as a varint).\nfunc (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {\n\tiLen := len(o.buf)\n\to.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length\n\tiMsg := len(o.buf)\n\terr := enc()\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn err\n\t}\n\tlMsg := len(o.buf) - iMsg\n\tlLen := sizeVarint(uint64(lMsg))\n\tswitch x := lLen - (iMsg - iLen); {\n\tcase x > 0: // actual length is x bytes larger than the space we reserved\n\t\t// Move msg x bytes right.\n\t\to.buf = append(o.buf, zeroes[:x]...)\n\t\tcopy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])\n\tcase x < 0: // actual length is x bytes smaller than the space we reserved\n\t\t// Move msg x bytes left.\n\t\tcopy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])\n\t\to.buf = o.buf[:len(o.buf)+x] // x is negative\n\t}\n\t// Encode the length in the reserved space.\n\to.buf = o.buf[:iLen]\n\to.EncodeVarint(uint64(lMsg))\n\to.buf = o.buf[:len(o.buf)+lMsg]\n\treturn state.err\n}\n\n// errorState maintains the first error that occurs and updates that error\n// with additional context.\ntype errorState struct {\n\terr error\n}\n\n// shouldContinue reports whether encoding should continue upon encountering the\n// given error. If the error is RequiredNotSetError, shouldContinue returns true\n// and, if this is the first appearance of that error, remembers it for future\n// reporting.\n//\n// If prop is not nil, it may update any error with additional context about the\n// field with the error.\nfunc (s *errorState) shouldContinue(err error, prop *Properties) bool {\n\t// Ignore unset required fields.\n\treqNotSet, ok := err.(*RequiredNotSetError)\n\tif !ok {\n\t\treturn false\n\t}\n\tif s.err == nil {\n\t\tif prop != nil {\n\t\t\terr = &RequiredNotSetError{prop.Name + \".\" + reqNotSet.field}\n\t\t}\n\t\ts.err = err\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/equal.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Protocol buffer comparison.\n// TODO: MessageSet.\n\npackage proto\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n/*\nEqual returns true iff protocol buffers a and b are equal.\nThe arguments must both be pointers to protocol buffer structs.\n\nEquality is defined in this way:\n  - Two messages are equal iff they are the same type,\n    corresponding fields are equal, unknown field sets\n    are equal, and extensions sets are equal.\n  - Two set scalar fields are equal iff their values are equal.\n    If the fields are of a floating-point type, remember that\n    NaN != x for all x, including NaN.\n  - Two repeated fields are equal iff their lengths are the same,\n    and their corresponding elements are equal (a \"bytes\" field,\n    although represented by []byte, is not a repeated field)\n  - Two unset fields are equal.\n  - Two unknown field sets are equal if their current\n    encoded state is equal.\n  - Two extension sets are equal iff they have corresponding\n    elements that are pairwise equal.\n  - Every other combination of things are not equal.\n\nThe return value is undefined if a and b are not protocol buffers.\n*/\nfunc Equal(a, b Message) bool {\n\tif a == nil || b == nil {\n\t\treturn a == b\n\t}\n\tv1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)\n\tif v1.Type() != v2.Type() {\n\t\treturn false\n\t}\n\tif v1.Kind() == reflect.Ptr {\n\t\tif v1.IsNil() {\n\t\t\treturn v2.IsNil()\n\t\t}\n\t\tif v2.IsNil() {\n\t\t\treturn false\n\t\t}\n\t\tv1, v2 = v1.Elem(), v2.Elem()\n\t}\n\tif v1.Kind() != reflect.Struct {\n\t\treturn false\n\t}\n\treturn equalStruct(v1, v2)\n}\n\n// v1 and v2 are known to have the same type.\nfunc equalStruct(v1, v2 reflect.Value) bool {\n\tfor i := 0; i < v1.NumField(); i++ {\n\t\tf := v1.Type().Field(i)\n\t\tif strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tcontinue\n\t\t}\n\t\tf1, f2 := v1.Field(i), v2.Field(i)\n\t\tif f.Type.Kind() == reflect.Ptr {\n\t\t\tif n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {\n\t\t\t\t// both unset\n\t\t\t\tcontinue\n\t\t\t} else if n1 != n2 {\n\t\t\t\t// set/unset mismatch\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tb1, ok := f1.Interface().(raw)\n\t\t\tif ok {\n\t\t\t\tb2 := f2.Interface().(raw)\n\t\t\t\t// RawMessage\n\t\t\t\tif !bytes.Equal(b1.Bytes(), b2.Bytes()) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf1, f2 = f1.Elem(), f2.Elem()\n\t\t}\n\t\tif !equalAny(f1, f2) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif em1 := v1.FieldByName(\"XXX_extensions\"); em1.IsValid() {\n\t\tem2 := v2.FieldByName(\"XXX_extensions\")\n\t\tif !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tuf := v1.FieldByName(\"XXX_unrecognized\")\n\tif !uf.IsValid() {\n\t\treturn true\n\t}\n\n\tu1 := uf.Bytes()\n\tu2 := v2.FieldByName(\"XXX_unrecognized\").Bytes()\n\tif !bytes.Equal(u1, u2) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// v1 and v2 are known to have the same type.\nfunc equalAny(v1, v2 reflect.Value) bool {\n\tif v1.Type() == protoMessageType {\n\t\tm1, _ := v1.Interface().(Message)\n\t\tm2, _ := v2.Interface().(Message)\n\t\treturn Equal(m1, m2)\n\t}\n\tswitch v1.Kind() {\n\tcase reflect.Bool:\n\t\treturn v1.Bool() == v2.Bool()\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v1.Float() == v2.Float()\n\tcase reflect.Int32, reflect.Int64:\n\t\treturn v1.Int() == v2.Int()\n\tcase reflect.Map:\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tfor _, key := range v1.MapKeys() {\n\t\t\tval2 := v2.MapIndex(key)\n\t\t\tif !val2.IsValid() {\n\t\t\t\t// This key was not found in the second map.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !equalAny(v1.MapIndex(key), val2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.Ptr:\n\t\treturn equalAny(v1.Elem(), v2.Elem())\n\tcase reflect.Slice:\n\t\tif v1.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// short circuit: []byte\n\t\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))\n\t\t}\n\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif !equalAny(v1.Index(i), v2.Index(i)) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.String:\n\t\treturn v1.Interface().(string) == v2.Interface().(string)\n\tcase reflect.Struct:\n\t\treturn equalStruct(v1, v2)\n\tcase reflect.Uint32, reflect.Uint64:\n\t\treturn v1.Uint() == v2.Uint()\n\t}\n\n\t// unknown type, so not a protocol buffer\n\tlog.Printf(\"proto: don't know how to compare %v\", v1)\n\treturn false\n}\n\n// base is the struct type that the extensions are based on.\n// em1 and em2 are extension maps.\nfunc equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {\n\tif len(em1) != len(em2) {\n\t\treturn false\n\t}\n\n\tfor extNum, e1 := range em1 {\n\t\te2, ok := em2[extNum]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tm1, m2 := e1.value, e2.value\n\n\t\tif m1 != nil && m2 != nil {\n\t\t\t// Both are unencoded.\n\t\t\tif !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// At least one is encoded. To do a semantically correct comparison\n\t\t// we need to unmarshal them first.\n\t\tvar desc *ExtensionDesc\n\t\tif m := extensionMaps[base]; m != nil {\n\t\t\tdesc = m[extNum]\n\t\t}\n\t\tif desc == nil {\n\t\t\tlog.Printf(\"proto: don't know how to compare extension %d of %v\", extNum, base)\n\t\t\tcontinue\n\t\t}\n\t\tvar err error\n\t\tif m1 == nil {\n\t\t\tm1, err = decodeExtension(e1.enc, desc)\n\t\t}\n\t\tif m2 == nil && err == nil {\n\t\t\tm2, err = decodeExtension(e2.enc, desc)\n\t\t}\n\t\tif err != nil {\n\t\t\t// The encoded form is invalid.\n\t\t\tlog.Printf(\"proto: badly encoded extension %d of %v: %v\", extNum, base, err)\n\t\t\treturn false\n\t\t}\n\t\tif !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/extensions.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Types and routines for supporting protocol buffer extensions.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.\nvar ErrMissingExtension = errors.New(\"proto: missing extension\")\n\n// ExtensionRange represents a range of message extensions for a protocol buffer.\n// Used in code generated by the protocol compiler.\ntype ExtensionRange struct {\n\tStart, End int32 // both inclusive\n}\n\n// extendableProto is an interface implemented by any protocol buffer that may be extended.\ntype extendableProto interface {\n\tMessage\n\tExtensionRangeArray() []ExtensionRange\n\tExtensionMap() map[int32]Extension\n}\n\nvar extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()\n\n// ExtensionDesc represents an extension specification.\n// Used in generated code from the protocol compiler.\ntype ExtensionDesc struct {\n\tExtendedType  Message     // nil pointer to the type that is being extended\n\tExtensionType interface{} // nil pointer to the extension type\n\tField         int32       // field number\n\tName          string      // fully-qualified name of extension, for text formatting\n\tTag           string      // protobuf tag style\n}\n\nfunc (ed *ExtensionDesc) repeated() bool {\n\tt := reflect.TypeOf(ed.ExtensionType)\n\treturn t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8\n}\n\n// Extension represents an extension in a message.\ntype Extension struct {\n\t// When an extension is stored in a message using SetExtension\n\t// only desc and value are set. When the message is marshaled\n\t// enc will be set to the encoded form of the message.\n\t//\n\t// When a message is unmarshaled and contains extensions, each\n\t// extension will have only enc set. When such an extension is\n\t// accessed using GetExtension (or GetExtensions) desc and value\n\t// will be set.\n\tdesc  *ExtensionDesc\n\tvalue interface{}\n\tenc   []byte\n}\n\n// SetRawExtension is for testing only.\nfunc SetRawExtension(base extendableProto, id int32, b []byte) {\n\tbase.ExtensionMap()[id] = Extension{enc: b}\n}\n\n// isExtensionField returns true iff the given field number is in an extension range.\nfunc isExtensionField(pb extendableProto, field int32) bool {\n\tfor _, er := range pb.ExtensionRangeArray() {\n\t\tif er.Start <= field && field <= er.End {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// checkExtensionTypes checks that the given extension is valid for pb.\nfunc checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {\n\t// Check the extended type.\n\tif a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {\n\t\treturn errors.New(\"proto: bad extended type; \" + b.String() + \" does not extend \" + a.String())\n\t}\n\t// Check the range.\n\tif !isExtensionField(pb, extension.Field) {\n\t\treturn errors.New(\"proto: bad extension number; not in declared ranges\")\n\t}\n\treturn nil\n}\n\n// extPropKey is sufficient to uniquely identify an extension.\ntype extPropKey struct {\n\tbase  reflect.Type\n\tfield int32\n}\n\nvar extProp = struct {\n\tsync.RWMutex\n\tm map[extPropKey]*Properties\n}{\n\tm: make(map[extPropKey]*Properties),\n}\n\nfunc extensionProperties(ed *ExtensionDesc) *Properties {\n\tkey := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}\n\n\textProp.RLock()\n\tif prop, ok := extProp.m[key]; ok {\n\t\textProp.RUnlock()\n\t\treturn prop\n\t}\n\textProp.RUnlock()\n\n\textProp.Lock()\n\tdefer extProp.Unlock()\n\t// Check again.\n\tif prop, ok := extProp.m[key]; ok {\n\t\treturn prop\n\t}\n\n\tprop := new(Properties)\n\tprop.Init(reflect.TypeOf(ed.ExtensionType), \"unknown_name\", ed.Tag, nil)\n\textProp.m[key] = prop\n\treturn prop\n}\n\n// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.\nfunc encodeExtensionMap(m map[int32]Extension) error {\n\tfor k, e := range m {\n\t\tif e.value == nil || e.desc == nil {\n\t\t\t// Extension is only in its encoded form.\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't skip extensions that have an encoded form set,\n\t\t// because the extension value may have been mutated after\n\t\t// the last time this function was called.\n\n\t\tet := reflect.TypeOf(e.desc.ExtensionType)\n\t\tprops := extensionProperties(e.desc)\n\n\t\tp := NewBuffer(nil)\n\t\t// If e.value has type T, the encoder expects a *struct{ X T }.\n\t\t// Pass a *T with a zero field and hope it all works out.\n\t\tx := reflect.New(et)\n\t\tx.Elem().Set(reflect.ValueOf(e.value))\n\t\tif err := props.enc(p, props, toStructPointer(x)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.enc = p.buf\n\t\tm[k] = e\n\t}\n\treturn nil\n}\n\nfunc sizeExtensionMap(m map[int32]Extension) (n int) {\n\tfor _, e := range m {\n\t\tif e.value == nil || e.desc == nil {\n\t\t\t// Extension is only in its encoded form.\n\t\t\tn += len(e.enc)\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't skip extensions that have an encoded form set,\n\t\t// because the extension value may have been mutated after\n\t\t// the last time this function was called.\n\n\t\tet := reflect.TypeOf(e.desc.ExtensionType)\n\t\tprops := extensionProperties(e.desc)\n\n\t\t// If e.value has type T, the encoder expects a *struct{ X T }.\n\t\t// Pass a *T with a zero field and hope it all works out.\n\t\tx := reflect.New(et)\n\t\tx.Elem().Set(reflect.ValueOf(e.value))\n\t\tn += props.size(props, toStructPointer(x))\n\t}\n\treturn\n}\n\n// HasExtension returns whether the given extension is present in pb.\nfunc HasExtension(pb extendableProto, extension *ExtensionDesc) bool {\n\t// TODO: Check types, field numbers, etc.?\n\t_, ok := pb.ExtensionMap()[extension.Field]\n\treturn ok\n}\n\n// ClearExtension removes the given extension from pb.\nfunc ClearExtension(pb extendableProto, extension *ExtensionDesc) {\n\t// TODO: Check types, field numbers, etc.?\n\tdelete(pb.ExtensionMap(), extension.Field)\n}\n\n// GetExtension parses and returns the given extension of pb.\n// If the extension is not present and has no default value it returns ErrMissingExtension.\nfunc GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {\n\tif err := checkExtensionTypes(pb, extension); err != nil {\n\t\treturn nil, err\n\t}\n\n\temap := pb.ExtensionMap()\n\te, ok := emap[extension.Field]\n\tif !ok {\n\t\t// defaultExtensionValue returns the default value or\n\t\t// ErrMissingExtension if there is no default.\n\t\treturn defaultExtensionValue(extension)\n\t}\n\n\tif e.value != nil {\n\t\t// Already decoded. Check the descriptor, though.\n\t\tif e.desc != extension {\n\t\t\t// This shouldn't happen. If it does, it means that\n\t\t\t// GetExtension was called twice with two different\n\t\t\t// descriptors with the same field number.\n\t\t\treturn nil, errors.New(\"proto: descriptor conflict\")\n\t\t}\n\t\treturn e.value, nil\n\t}\n\n\tv, err := decodeExtension(e.enc, extension)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remember the decoded version and drop the encoded version.\n\t// That way it is safe to mutate what we return.\n\te.value = v\n\te.desc = extension\n\te.enc = nil\n\temap[extension.Field] = e\n\treturn e.value, nil\n}\n\n// defaultExtensionValue returns the default value for extension.\n// If no default for an extension is defined ErrMissingExtension is returned.\nfunc defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {\n\tt := reflect.TypeOf(extension.ExtensionType)\n\tprops := extensionProperties(extension)\n\n\tsf, _, err := fieldDefault(t, props)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif sf == nil || sf.value == nil {\n\t\t// There is no default value.\n\t\treturn nil, ErrMissingExtension\n\t}\n\n\tif t.Kind() != reflect.Ptr {\n\t\t// We do not need to return a Ptr, we can directly return sf.value.\n\t\treturn sf.value, nil\n\t}\n\n\t// We need to return an interface{} that is a pointer to sf.value.\n\tvalue := reflect.New(t).Elem()\n\tvalue.Set(reflect.New(value.Type().Elem()))\n\tif sf.kind == reflect.Int32 {\n\t\t// We may have an int32 or an enum, but the underlying data is int32.\n\t\t// Since we can't set an int32 into a non int32 reflect.value directly\n\t\t// set it as a int32.\n\t\tvalue.Elem().SetInt(int64(sf.value.(int32)))\n\t} else {\n\t\tvalue.Elem().Set(reflect.ValueOf(sf.value))\n\t}\n\treturn value.Interface(), nil\n}\n\n// decodeExtension decodes an extension encoded in b.\nfunc decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {\n\to := NewBuffer(b)\n\n\tt := reflect.TypeOf(extension.ExtensionType)\n\trep := extension.repeated()\n\n\tprops := extensionProperties(extension)\n\n\t// t is a pointer to a struct, pointer to basic type or a slice.\n\t// Allocate a \"field\" to store the pointer/slice itself; the\n\t// pointer/slice will be stored here. We pass\n\t// the address of this field to props.dec.\n\t// This passes a zero field and a *t and lets props.dec\n\t// interpret it as a *struct{ x t }.\n\tvalue := reflect.New(t).Elem()\n\n\tfor {\n\t\t// Discard wire type and field number varint. It isn't needed.\n\t\tif _, err := o.DecodeVarint(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !rep || o.index >= len(o.buf) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn value.Interface(), nil\n}\n\n// GetExtensions returns a slice of the extensions present in pb that are also listed in es.\n// The returned slice has the same length as es; missing extensions will appear as nil elements.\nfunc GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {\n\tepb, ok := pb.(extendableProto)\n\tif !ok {\n\t\terr = errors.New(\"proto: not an extendable proto\")\n\t\treturn\n\t}\n\textensions = make([]interface{}, len(es))\n\tfor i, e := range es {\n\t\textensions[i], err = GetExtension(epb, e)\n\t\tif err == ErrMissingExtension {\n\t\t\terr = nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n// SetExtension sets the specified extension of pb to the specified value.\nfunc SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {\n\tif err := checkExtensionTypes(pb, extension); err != nil {\n\t\treturn err\n\t}\n\ttyp := reflect.TypeOf(extension.ExtensionType)\n\tif typ != reflect.TypeOf(value) {\n\t\treturn errors.New(\"proto: bad extension value type\")\n\t}\n\t// nil extension values need to be caught early, because the\n\t// encoder can't distinguish an ErrNil due to a nil extension\n\t// from an ErrNil due to a missing field. Extensions are\n\t// always optional, so the encoder would just swallow the error\n\t// and drop all the extensions from the encoded message.\n\tif reflect.ValueOf(value).IsNil() {\n\t\treturn fmt.Errorf(\"proto: SetExtension called with nil value of type %T\", value)\n\t}\n\n\tpb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}\n\treturn nil\n}\n\n// A global registry of extensions.\n// The generated code will register the generated descriptors by calling RegisterExtension.\n\nvar extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)\n\n// RegisterExtension is called from the generated code.\nfunc RegisterExtension(desc *ExtensionDesc) {\n\tst := reflect.TypeOf(desc.ExtendedType).Elem()\n\tm := extensionMaps[st]\n\tif m == nil {\n\t\tm = make(map[int32]*ExtensionDesc)\n\t\textensionMaps[st] = m\n\t}\n\tif _, ok := m[desc.Field]; ok {\n\t\tpanic(\"proto: duplicate extension registered: \" + st.String() + \" \" + strconv.Itoa(int(desc.Field)))\n\t}\n\tm[desc.Field] = desc\n}\n\n// RegisteredExtensions returns a map of the registered extensions of a\n// protocol buffer struct, indexed by the extension number.\n// The argument pb should be a nil pointer to the struct type.\nfunc RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {\n\treturn extensionMaps[reflect.TypeOf(pb).Elem()]\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/lib.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/*\nPackage proto converts data structures to and from the wire format of\nprotocol buffers.  It works in concert with the Go source code generated\nfor .proto files by the protocol compiler.\n\nA summary of the properties of the protocol buffer interface\nfor a protocol buffer variable v:\n\n  - Names are turned from camel_case to CamelCase for export.\n  - There are no methods on v to set fields; just treat\n\tthem as structure fields.\n  - There are getters that return a field's value if set,\n\tand return the field's default value if unset.\n\tThe getters work even if the receiver is a nil message.\n  - The zero value for a struct is its correct initialization state.\n\tAll desired fields must be set before marshaling.\n  - A Reset() method will restore a protobuf struct to its zero state.\n  - Non-repeated fields are pointers to the values; nil means unset.\n\tThat is, optional or required field int32 f becomes F *int32.\n  - Repeated fields are slices.\n  - Helper functions are available to aid the setting of fields.\n\tmsg.Foo = proto.String(\"hello\") // set field\n  - Constants are defined to hold the default values of all fields that\n\thave them.  They have the form Default_StructName_FieldName.\n\tBecause the getter methods handle defaulted values,\n\tdirect use of these constants should be rare.\n  - Enums are given type names and maps from names to values.\n\tEnum values are prefixed by the enclosing message's name, or by the\n\tenum's type name if it is a top-level enum. Enum types have a String\n\tmethod, and a Enum method to assist in message construction.\n  - Nested messages, groups and enums have type names prefixed with the name of\n\tthe surrounding message type.\n  - Extensions are given descriptor names that start with E_,\n\tfollowed by an underscore-delimited list of the nested messages\n\tthat contain it (if any) followed by the CamelCased name of the\n\textension field itself.  HasExtension, ClearExtension, GetExtension\n\tand SetExtension are functions for manipulating extensions.\n  - Marshal and Unmarshal are functions to encode and decode the wire format.\n\nThe simplest way to describe this is to see an example.\nGiven file test.proto, containing\n\n\tpackage example;\n\n\tenum FOO { X = 17; }\n\n\tmessage Test {\n\t  required string label = 1;\n\t  optional int32 type = 2 [default=77];\n\t  repeated int64 reps = 3;\n\t  optional group OptionalGroup = 4 {\n\t    required string RequiredField = 5;\n\t  }\n\t}\n\nThe resulting file, test.pb.go, is:\n\n\tpackage example\n\n\timport proto \"github.com/golang/protobuf/proto\"\n\timport math \"math\"\n\n\ttype FOO int32\n\tconst (\n\t\tFOO_X FOO = 17\n\t)\n\tvar FOO_name = map[int32]string{\n\t\t17: \"X\",\n\t}\n\tvar FOO_value = map[string]int32{\n\t\t\"X\": 17,\n\t}\n\n\tfunc (x FOO) Enum() *FOO {\n\t\tp := new(FOO)\n\t\t*p = x\n\t\treturn p\n\t}\n\tfunc (x FOO) String() string {\n\t\treturn proto.EnumName(FOO_name, int32(x))\n\t}\n\tfunc (x *FOO) UnmarshalJSON(data []byte) error {\n\t\tvalue, err := proto.UnmarshalJSONEnum(FOO_value, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*x = FOO(value)\n\t\treturn nil\n\t}\n\n\ttype Test struct {\n\t\tLabel            *string             `protobuf:\"bytes,1,req,name=label\" json:\"label,omitempty\"`\n\t\tType             *int32              `protobuf:\"varint,2,opt,name=type,def=77\" json:\"type,omitempty\"`\n\t\tReps             []int64             `protobuf:\"varint,3,rep,name=reps\" json:\"reps,omitempty\"`\n\t\tOptionalgroup    *Test_OptionalGroup `protobuf:\"group,4,opt,name=OptionalGroup\" json:\"optionalgroup,omitempty\"`\n\t\tXXX_unrecognized []byte              `json:\"-\"`\n\t}\n\tfunc (m *Test) Reset()         { *m = Test{} }\n\tfunc (m *Test) String() string { return proto.CompactTextString(m) }\n\tfunc (*Test) ProtoMessage()    {}\n\tconst Default_Test_Type int32 = 77\n\n\tfunc (m *Test) GetLabel() string {\n\t\tif m != nil && m.Label != nil {\n\t\t\treturn *m.Label\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfunc (m *Test) GetType() int32 {\n\t\tif m != nil && m.Type != nil {\n\t\t\treturn *m.Type\n\t\t}\n\t\treturn Default_Test_Type\n\t}\n\n\tfunc (m *Test) GetOptionalgroup() *Test_OptionalGroup {\n\t\tif m != nil {\n\t\t\treturn m.Optionalgroup\n\t\t}\n\t\treturn nil\n\t}\n\n\ttype Test_OptionalGroup struct {\n\t\tRequiredField *string `protobuf:\"bytes,5,req\" json:\"RequiredField,omitempty\"`\n\t}\n\tfunc (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }\n\tfunc (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }\n\n\tfunc (m *Test_OptionalGroup) GetRequiredField() string {\n\t\tif m != nil && m.RequiredField != nil {\n\t\t\treturn *m.RequiredField\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfunc init() {\n\t\tproto.RegisterEnum(\"example.FOO\", FOO_name, FOO_value)\n\t}\n\nTo create and play with a Test object:\n\npackage main\n\n\timport (\n\t\t\"log\"\n\n\t\t\"github.com/golang/protobuf/proto\"\n\t\tpb \"./example.pb\"\n\t)\n\n\tfunc main() {\n\t\ttest := &pb.Test{\n\t\t\tLabel: proto.String(\"hello\"),\n\t\t\tType:  proto.Int32(17),\n\t\t\tOptionalgroup: &pb.Test_OptionalGroup{\n\t\t\t\tRequiredField: proto.String(\"good bye\"),\n\t\t\t},\n\t\t}\n\t\tdata, err := proto.Marshal(test)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"marshaling error: \", err)\n\t\t}\n\t\tnewTest := &pb.Test{}\n\t\terr = proto.Unmarshal(data, newTest)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unmarshaling error: \", err)\n\t\t}\n\t\t// Now test and newTest contain the same data.\n\t\tif test.GetLabel() != newTest.GetLabel() {\n\t\t\tlog.Fatalf(\"data mismatch %q != %q\", test.GetLabel(), newTest.GetLabel())\n\t\t}\n\t\t// etc.\n\t}\n*/\npackage proto\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// Message is implemented by generated protocol buffer messages.\ntype Message interface {\n\tReset()\n\tString() string\n\tProtoMessage()\n}\n\n// Stats records allocation details about the protocol buffer encoders\n// and decoders.  Useful for tuning the library itself.\ntype Stats struct {\n\tEmalloc uint64 // mallocs in encode\n\tDmalloc uint64 // mallocs in decode\n\tEncode  uint64 // number of encodes\n\tDecode  uint64 // number of decodes\n\tChit    uint64 // number of cache hits\n\tCmiss   uint64 // number of cache misses\n\tSize    uint64 // number of sizes\n}\n\n// Set to true to enable stats collection.\nconst collectStats = false\n\nvar stats Stats\n\n// GetStats returns a copy of the global Stats structure.\nfunc GetStats() Stats { return stats }\n\n// A Buffer is a buffer manager for marshaling and unmarshaling\n// protocol buffers.  It may be reused between invocations to\n// reduce memory usage.  It is not necessary to use a Buffer;\n// the global functions Marshal and Unmarshal create a\n// temporary Buffer and are fine for most applications.\ntype Buffer struct {\n\tbuf   []byte // encode/decode byte stream\n\tindex int    // write point\n\n\t// pools of basic types to amortize allocation.\n\tbools   []bool\n\tuint32s []uint32\n\tuint64s []uint64\n\n\t// extra pools, only used with pointer_reflect.go\n\tint32s   []int32\n\tint64s   []int64\n\tfloat32s []float32\n\tfloat64s []float64\n}\n\n// NewBuffer allocates a new Buffer and initializes its internal data to\n// the contents of the argument slice.\nfunc NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e}\n}\n\n// Reset resets the Buffer, ready for marshaling a new protocol buffer.\nfunc (p *Buffer) Reset() {\n\tp.buf = p.buf[0:0] // for reading/writing\n\tp.index = 0        // for reading\n}\n\n// SetBuf replaces the internal buffer with the slice,\n// ready for unmarshaling the contents of the slice.\nfunc (p *Buffer) SetBuf(s []byte) {\n\tp.buf = s\n\tp.index = 0\n}\n\n// Bytes returns the contents of the Buffer.\nfunc (p *Buffer) Bytes() []byte { return p.buf }\n\n/*\n * Helper routines for simplifying the creation of optional fields of basic type.\n */\n\n// Bool is a helper routine that allocates a new bool value\n// to store v and returns a pointer to it.\nfunc Bool(v bool) *bool {\n\treturn &v\n}\n\n// Int32 is a helper routine that allocates a new int32 value\n// to store v and returns a pointer to it.\nfunc Int32(v int32) *int32 {\n\treturn &v\n}\n\n// Int is a helper routine that allocates a new int32 value\n// to store v and returns a pointer to it, but unlike Int32\n// its argument value is an int.\nfunc Int(v int) *int32 {\n\tp := new(int32)\n\t*p = int32(v)\n\treturn p\n}\n\n// Int64 is a helper routine that allocates a new int64 value\n// to store v and returns a pointer to it.\nfunc Int64(v int64) *int64 {\n\treturn &v\n}\n\n// Float32 is a helper routine that allocates a new float32 value\n// to store v and returns a pointer to it.\nfunc Float32(v float32) *float32 {\n\treturn &v\n}\n\n// Float64 is a helper routine that allocates a new float64 value\n// to store v and returns a pointer to it.\nfunc Float64(v float64) *float64 {\n\treturn &v\n}\n\n// Uint32 is a helper routine that allocates a new uint32 value\n// to store v and returns a pointer to it.\nfunc Uint32(v uint32) *uint32 {\n\treturn &v\n}\n\n// Uint64 is a helper routine that allocates a new uint64 value\n// to store v and returns a pointer to it.\nfunc Uint64(v uint64) *uint64 {\n\treturn &v\n}\n\n// String is a helper routine that allocates a new string value\n// to store v and returns a pointer to it.\nfunc String(v string) *string {\n\treturn &v\n}\n\n// EnumName is a helper function to simplify printing protocol buffer enums\n// by name.  Given an enum map and a value, it returns a useful string.\nfunc EnumName(m map[int32]string, v int32) string {\n\ts, ok := m[v]\n\tif ok {\n\t\treturn s\n\t}\n\treturn strconv.Itoa(int(v))\n}\n\n// UnmarshalJSONEnum is a helper function to simplify recovering enum int values\n// from their JSON-encoded representation. Given a map from the enum's symbolic\n// names to its int values, and a byte buffer containing the JSON-encoded\n// value, it returns an int32 that can be cast to the enum type by the caller.\n//\n// The function can deal with both JSON representations, numeric and symbolic.\nfunc UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {\n\tif data[0] == '\"' {\n\t\t// New style: enums are strings.\n\t\tvar repr string\n\t\tif err := json.Unmarshal(data, &repr); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\tval, ok := m[repr]\n\t\tif !ok {\n\t\t\treturn 0, fmt.Errorf(\"unrecognized enum %s value %q\", enumName, repr)\n\t\t}\n\t\treturn val, nil\n\t}\n\t// Old style: enums are ints.\n\tvar val int32\n\tif err := json.Unmarshal(data, &val); err != nil {\n\t\treturn 0, fmt.Errorf(\"cannot unmarshal %#q into enum %s\", data, enumName)\n\t}\n\treturn val, nil\n}\n\n// DebugPrint dumps the encoded data in b in a debugging format with a header\n// including the string s. Used in testing but made available for general debugging.\nfunc (p *Buffer) DebugPrint(s string, b []byte) {\n\tvar u uint64\n\n\tobuf := p.buf\n\tindex := p.index\n\tp.buf = b\n\tp.index = 0\n\tdepth := 0\n\n\tfmt.Printf(\"\\n--- %s ---\\n\", s)\n\nout:\n\tfor {\n\t\tfor i := 0; i < depth; i++ {\n\t\t\tfmt.Print(\"  \")\n\t\t}\n\n\t\tindex := p.index\n\t\tif index == len(p.buf) {\n\t\t\tbreak\n\t\t}\n\n\t\top, err := p.DecodeVarint()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%3d: fetching op err %v\\n\", index, err)\n\t\t\tbreak out\n\t\t}\n\t\ttag := op >> 3\n\t\twire := op & 7\n\n\t\tswitch wire {\n\t\tdefault:\n\t\t\tfmt.Printf(\"%3d: t=%3d unknown wire=%d\\n\",\n\t\t\t\tindex, tag, wire)\n\t\t\tbreak out\n\n\t\tcase WireBytes:\n\t\t\tvar r []byte\n\n\t\t\tr, err = p.DecodeRawBytes(false)\n\t\t\tif err != nil {\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d bytes [%d]\", index, tag, len(r))\n\t\t\tif len(r) <= 6 {\n\t\t\t\tfor i := 0; i < len(r); i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor i := 0; i < 3; i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\" ..\")\n\t\t\t\tfor i := len(r) - 3; i < len(r); i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\n\t\tcase WireFixed32:\n\t\t\tu, err = p.DecodeFixed32()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d fix32 err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d fix32 %d\\n\", index, tag, u)\n\n\t\tcase WireFixed64:\n\t\t\tu, err = p.DecodeFixed64()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d fix64 err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d fix64 %d\\n\", index, tag, u)\n\t\t\tbreak\n\n\t\tcase WireVarint:\n\t\t\tu, err = p.DecodeVarint()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d varint err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d varint %d\\n\", index, tag, u)\n\n\t\tcase WireStartGroup:\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d start err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d start\\n\", index, tag)\n\t\t\tdepth++\n\n\t\tcase WireEndGroup:\n\t\t\tdepth--\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d end err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d end\\n\", index, tag)\n\t\t}\n\t}\n\n\tif depth != 0 {\n\t\tfmt.Printf(\"%3d: start-end not balanced %d\\n\", p.index, depth)\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tp.buf = obuf\n\tp.index = index\n}\n\n// SetDefaults sets unset protocol buffer fields to their default values.\n// It only modifies fields that are both unset and have defined defaults.\n// It recursively sets default values in any non-nil sub-messages.\nfunc SetDefaults(pb Message) {\n\tsetDefaults(reflect.ValueOf(pb), true, false)\n}\n\n// v is a pointer to a struct.\nfunc setDefaults(v reflect.Value, recur, zeros bool) {\n\tv = v.Elem()\n\n\tdefaultMu.RLock()\n\tdm, ok := defaults[v.Type()]\n\tdefaultMu.RUnlock()\n\tif !ok {\n\t\tdm = buildDefaultMessage(v.Type())\n\t\tdefaultMu.Lock()\n\t\tdefaults[v.Type()] = dm\n\t\tdefaultMu.Unlock()\n\t}\n\n\tfor _, sf := range dm.scalars {\n\t\tf := v.Field(sf.index)\n\t\tif !f.IsNil() {\n\t\t\t// field already set\n\t\t\tcontinue\n\t\t}\n\t\tdv := sf.value\n\t\tif dv == nil && !zeros {\n\t\t\t// no explicit default, and don't want to set zeros\n\t\t\tcontinue\n\t\t}\n\t\tfptr := f.Addr().Interface() // **T\n\t\t// TODO: Consider batching the allocations we do here.\n\t\tswitch sf.kind {\n\t\tcase reflect.Bool:\n\t\t\tb := new(bool)\n\t\t\tif dv != nil {\n\t\t\t\t*b = dv.(bool)\n\t\t\t}\n\t\t\t*(fptr.(**bool)) = b\n\t\tcase reflect.Float32:\n\t\t\tf := new(float32)\n\t\t\tif dv != nil {\n\t\t\t\t*f = dv.(float32)\n\t\t\t}\n\t\t\t*(fptr.(**float32)) = f\n\t\tcase reflect.Float64:\n\t\t\tf := new(float64)\n\t\t\tif dv != nil {\n\t\t\t\t*f = dv.(float64)\n\t\t\t}\n\t\t\t*(fptr.(**float64)) = f\n\t\tcase reflect.Int32:\n\t\t\t// might be an enum\n\t\t\tif ft := f.Type(); ft != int32PtrType {\n\t\t\t\t// enum\n\t\t\t\tf.Set(reflect.New(ft.Elem()))\n\t\t\t\tif dv != nil {\n\t\t\t\t\tf.Elem().SetInt(int64(dv.(int32)))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// int32 field\n\t\t\t\ti := new(int32)\n\t\t\t\tif dv != nil {\n\t\t\t\t\t*i = dv.(int32)\n\t\t\t\t}\n\t\t\t\t*(fptr.(**int32)) = i\n\t\t\t}\n\t\tcase reflect.Int64:\n\t\t\ti := new(int64)\n\t\t\tif dv != nil {\n\t\t\t\t*i = dv.(int64)\n\t\t\t}\n\t\t\t*(fptr.(**int64)) = i\n\t\tcase reflect.String:\n\t\t\ts := new(string)\n\t\t\tif dv != nil {\n\t\t\t\t*s = dv.(string)\n\t\t\t}\n\t\t\t*(fptr.(**string)) = s\n\t\tcase reflect.Uint8:\n\t\t\t// exceptional case: []byte\n\t\t\tvar b []byte\n\t\t\tif dv != nil {\n\t\t\t\tdb := dv.([]byte)\n\t\t\t\tb = make([]byte, len(db))\n\t\t\t\tcopy(b, db)\n\t\t\t} else {\n\t\t\t\tb = []byte{}\n\t\t\t}\n\t\t\t*(fptr.(*[]byte)) = b\n\t\tcase reflect.Uint32:\n\t\t\tu := new(uint32)\n\t\t\tif dv != nil {\n\t\t\t\t*u = dv.(uint32)\n\t\t\t}\n\t\t\t*(fptr.(**uint32)) = u\n\t\tcase reflect.Uint64:\n\t\t\tu := new(uint64)\n\t\t\tif dv != nil {\n\t\t\t\t*u = dv.(uint64)\n\t\t\t}\n\t\t\t*(fptr.(**uint64)) = u\n\t\tdefault:\n\t\t\tlog.Printf(\"proto: can't set default for field %v (sf.kind=%v)\", f, sf.kind)\n\t\t}\n\t}\n\n\tfor _, ni := range dm.nested {\n\t\tf := v.Field(ni)\n\t\t// f is *T or []*T or map[T]*T\n\t\tswitch f.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif f.IsNil() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsetDefaults(f, recur, zeros)\n\n\t\tcase reflect.Slice:\n\t\t\tfor i := 0; i < f.Len(); i++ {\n\t\t\t\te := f.Index(i)\n\t\t\t\tif e.IsNil() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsetDefaults(e, recur, zeros)\n\t\t\t}\n\n\t\tcase reflect.Map:\n\t\t\tfor _, k := range f.MapKeys() {\n\t\t\t\te := f.MapIndex(k)\n\t\t\t\tif e.IsNil() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsetDefaults(e, recur, zeros)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar (\n\t// defaults maps a protocol buffer struct type to a slice of the fields,\n\t// with its scalar fields set to their proto-declared non-zero default values.\n\tdefaultMu sync.RWMutex\n\tdefaults  = make(map[reflect.Type]defaultMessage)\n\n\tint32PtrType = reflect.TypeOf((*int32)(nil))\n)\n\n// defaultMessage represents information about the default values of a message.\ntype defaultMessage struct {\n\tscalars []scalarField\n\tnested  []int // struct field index of nested messages\n}\n\ntype scalarField struct {\n\tindex int          // struct field index\n\tkind  reflect.Kind // element type (the T in *T or []T)\n\tvalue interface{}  // the proto-declared default value, or nil\n}\n\n// t is a struct type.\nfunc buildDefaultMessage(t reflect.Type) (dm defaultMessage) {\n\tsprop := GetProperties(t)\n\tfor _, prop := range sprop.Prop {\n\t\tfi, ok := sprop.decoderTags.get(prop.Tag)\n\t\tif !ok {\n\t\t\t// XXX_unrecognized\n\t\t\tcontinue\n\t\t}\n\t\tft := t.Field(fi).Type\n\n\t\tsf, nested, err := fieldDefault(ft, prop)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\tlog.Print(err)\n\t\tcase nested:\n\t\t\tdm.nested = append(dm.nested, fi)\n\t\tcase sf != nil:\n\t\t\tsf.index = fi\n\t\t\tdm.scalars = append(dm.scalars, *sf)\n\t\t}\n\t}\n\n\treturn dm\n}\n\n// fieldDefault returns the scalarField for field type ft.\n// sf will be nil if the field can not have a default.\n// nestedMessage will be true if this is a nested message.\n// Note that sf.index is not set on return.\nfunc fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {\n\tvar canHaveDefault bool\n\tswitch ft.Kind() {\n\tcase reflect.Ptr:\n\t\tif ft.Elem().Kind() == reflect.Struct {\n\t\t\tnestedMessage = true\n\t\t} else {\n\t\t\tcanHaveDefault = true // proto2 scalar field\n\t\t}\n\n\tcase reflect.Slice:\n\t\tswitch ft.Elem().Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tnestedMessage = true // repeated message\n\t\tcase reflect.Uint8:\n\t\t\tcanHaveDefault = true // bytes field\n\t\t}\n\n\tcase reflect.Map:\n\t\tif ft.Elem().Kind() == reflect.Ptr {\n\t\t\tnestedMessage = true // map with message values\n\t\t}\n\t}\n\n\tif !canHaveDefault {\n\t\tif nestedMessage {\n\t\t\treturn nil, true, nil\n\t\t}\n\t\treturn nil, false, nil\n\t}\n\n\t// We now know that ft is a pointer or slice.\n\tsf = &scalarField{kind: ft.Elem().Kind()}\n\n\t// scalar fields without defaults\n\tif !prop.HasDefault {\n\t\treturn sf, false, nil\n\t}\n\n\t// a scalar field: either *T or []byte\n\tswitch ft.Elem().Kind() {\n\tcase reflect.Bool:\n\t\tx, err := strconv.ParseBool(prop.Default)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default bool %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tcase reflect.Float32:\n\t\tx, err := strconv.ParseFloat(prop.Default, 32)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default float32 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = float32(x)\n\tcase reflect.Float64:\n\t\tx, err := strconv.ParseFloat(prop.Default, 64)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default float64 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tcase reflect.Int32:\n\t\tx, err := strconv.ParseInt(prop.Default, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default int32 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = int32(x)\n\tcase reflect.Int64:\n\t\tx, err := strconv.ParseInt(prop.Default, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default int64 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tcase reflect.String:\n\t\tsf.value = prop.Default\n\tcase reflect.Uint8:\n\t\t// []byte (not *uint8)\n\t\tsf.value = []byte(prop.Default)\n\tcase reflect.Uint32:\n\t\tx, err := strconv.ParseUint(prop.Default, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default uint32 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = uint32(x)\n\tcase reflect.Uint64:\n\t\tx, err := strconv.ParseUint(prop.Default, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default uint64 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tdefault:\n\t\treturn nil, false, fmt.Errorf(\"proto: unhandled def kind %v\", ft.Elem().Kind())\n\t}\n\n\treturn sf, false, nil\n}\n\n// Map fields may have key types of non-float scalars, strings and enums.\n// The easiest way to sort them in some deterministic order is to use fmt.\n// If this turns out to be inefficient we can always consider other options,\n// such as doing a Schwartzian transform.\n\ntype mapKeys []reflect.Value\n\nfunc (s mapKeys) Len() int      { return len(s) }\nfunc (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s mapKeys) Less(i, j int) bool {\n\treturn fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/message_set.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Support for message sets.\n */\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n)\n\n// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.\n// A message type ID is required for storing a protocol buffer in a message set.\nvar ErrNoMessageTypeId = errors.New(\"proto does not have a message type ID\")\n\n// The first two types (_MessageSet_Item and MessageSet)\n// model what the protocol compiler produces for the following protocol message:\n//   message MessageSet {\n//     repeated group Item = 1 {\n//       required int32 type_id = 2;\n//       required string message = 3;\n//     };\n//   }\n// That is the MessageSet wire format. We can't use a proto to generate these\n// because that would introduce a circular dependency between it and this package.\n//\n// When a proto1 proto has a field that looks like:\n//   optional message<MessageSet> info = 3;\n// the protocol compiler produces a field in the generated struct that looks like:\n//   Info *_proto_.MessageSet  `protobuf:\"bytes,3,opt,name=info\"`\n// The package is automatically inserted so there is no need for that proto file to\n// import this package.\n\ntype _MessageSet_Item struct {\n\tTypeId  *int32 `protobuf:\"varint,2,req,name=type_id\"`\n\tMessage []byte `protobuf:\"bytes,3,req,name=message\"`\n}\n\ntype MessageSet struct {\n\tItem             []*_MessageSet_Item `protobuf:\"group,1,rep\"`\n\tXXX_unrecognized []byte\n\t// TODO: caching?\n}\n\n// Make sure MessageSet is a Message.\nvar _ Message = (*MessageSet)(nil)\n\n// messageTypeIder is an interface satisfied by a protocol buffer type\n// that may be stored in a MessageSet.\ntype messageTypeIder interface {\n\tMessageTypeId() int32\n}\n\nfunc (ms *MessageSet) find(pb Message) *_MessageSet_Item {\n\tmti, ok := pb.(messageTypeIder)\n\tif !ok {\n\t\treturn nil\n\t}\n\tid := mti.MessageTypeId()\n\tfor _, item := range ms.Item {\n\t\tif *item.TypeId == id {\n\t\t\treturn item\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ms *MessageSet) Has(pb Message) bool {\n\tif ms.find(pb) != nil {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (ms *MessageSet) Unmarshal(pb Message) error {\n\tif item := ms.find(pb); item != nil {\n\t\treturn Unmarshal(item.Message, pb)\n\t}\n\tif _, ok := pb.(messageTypeIder); !ok {\n\t\treturn ErrNoMessageTypeId\n\t}\n\treturn nil // TODO: return error instead?\n}\n\nfunc (ms *MessageSet) Marshal(pb Message) error {\n\tmsg, err := Marshal(pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif item := ms.find(pb); item != nil {\n\t\t// reuse existing item\n\t\titem.Message = msg\n\t\treturn nil\n\t}\n\n\tmti, ok := pb.(messageTypeIder)\n\tif !ok {\n\t\treturn ErrNoMessageTypeId\n\t}\n\n\tmtid := mti.MessageTypeId()\n\tms.Item = append(ms.Item, &_MessageSet_Item{\n\t\tTypeId:  &mtid,\n\t\tMessage: msg,\n\t})\n\treturn nil\n}\n\nfunc (ms *MessageSet) Reset()         { *ms = MessageSet{} }\nfunc (ms *MessageSet) String() string { return CompactTextString(ms) }\nfunc (*MessageSet) ProtoMessage()     {}\n\n// Support for the message_set_wire_format message option.\n\nfunc skipVarint(buf []byte) []byte {\n\ti := 0\n\tfor ; buf[i]&0x80 != 0; i++ {\n\t}\n\treturn buf[i+1:]\n}\n\n// MarshalMessageSet encodes the extension map represented by m in the message set wire format.\n// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.\nfunc MarshalMessageSet(m map[int32]Extension) ([]byte, error) {\n\tif err := encodeExtensionMap(m); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Sort extension IDs to provide a deterministic encoding.\n\t// See also enc_map in encode.go.\n\tids := make([]int, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, int(id))\n\t}\n\tsort.Ints(ids)\n\n\tms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}\n\tfor _, id := range ids {\n\t\te := m[int32(id)]\n\t\t// Remove the wire type and field number varint, as well as the length varint.\n\t\tmsg := skipVarint(skipVarint(e.enc))\n\n\t\tms.Item = append(ms.Item, &_MessageSet_Item{\n\t\t\tTypeId:  Int32(int32(id)),\n\t\t\tMessage: msg,\n\t\t})\n\t}\n\treturn Marshal(ms)\n}\n\n// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.\n// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.\nfunc UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {\n\tms := new(MessageSet)\n\tif err := Unmarshal(buf, ms); err != nil {\n\t\treturn err\n\t}\n\tfor _, item := range ms.Item {\n\t\tid := *item.TypeId\n\t\tmsg := item.Message\n\n\t\t// Restore wire type and field number varint, plus length varint.\n\t\t// Be careful to preserve duplicate items.\n\t\tb := EncodeVarint(uint64(id)<<3 | WireBytes)\n\t\tif ext, ok := m[id]; ok {\n\t\t\t// Existing data; rip off the tag and length varint\n\t\t\t// so we join the new data correctly.\n\t\t\t// We can assume that ext.enc is set because we are unmarshaling.\n\t\t\to := ext.enc[len(b):]   // skip wire type and field number\n\t\t\t_, n := DecodeVarint(o) // calculate length of length varint\n\t\t\to = o[n:]               // skip length varint\n\t\t\tmsg = append(o, msg...) // join old data and new data\n\t\t}\n\t\tb = append(b, EncodeVarint(uint64(len(msg)))...)\n\t\tb = append(b, msg...)\n\n\t\tm[id] = Extension{enc: b}\n\t}\n\treturn nil\n}\n\n// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.\n// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.\nfunc MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {\n\tvar b bytes.Buffer\n\tb.WriteByte('{')\n\n\t// Process the map in key order for deterministic output.\n\tids := make([]int32, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, id)\n\t}\n\tsort.Sort(int32Slice(ids)) // int32Slice defined in text.go\n\n\tfor i, id := range ids {\n\t\text := m[id]\n\t\tif i > 0 {\n\t\t\tb.WriteByte(',')\n\t\t}\n\n\t\tmsd, ok := messageSetMap[id]\n\t\tif !ok {\n\t\t\t// Unknown type; we can't render it, so skip it.\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(&b, `\"[%s]\":`, msd.name)\n\n\t\tx := ext.value\n\t\tif x == nil {\n\t\t\tx = reflect.New(msd.t.Elem()).Interface()\n\t\t\tif err := Unmarshal(ext.enc, x.(Message)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\td, err := json.Marshal(x)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.Write(d)\n\t}\n\tb.WriteByte('}')\n\treturn b.Bytes(), nil\n}\n\n// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.\n// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.\nfunc UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {\n\t// Common-case fast path.\n\tif len(buf) == 0 || bytes.Equal(buf, []byte(\"{}\")) {\n\t\treturn nil\n\t}\n\n\t// This is fairly tricky, and it's not clear that it is needed.\n\treturn errors.New(\"TODO: UnmarshalMessageSetJSON not yet implemented\")\n}\n\n// A global registry of types that can be used in a MessageSet.\n\nvar messageSetMap = make(map[int32]messageSetDesc)\n\ntype messageSetDesc struct {\n\tt    reflect.Type // pointer to struct\n\tname string\n}\n\n// RegisterMessageSetType is called from the generated code.\nfunc RegisterMessageSetType(m Message, fieldNum int32, name string) {\n\tmessageSetMap[fieldNum] = messageSetDesc{\n\t\tt:    reflect.TypeOf(m),\n\t\tname: name,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/pointer_reflect.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build appengine\n\n// This file contains an implementation of proto field accesses using package reflect.\n// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can\n// be used on App Engine.\n\npackage proto\n\nimport (\n\t\"math\"\n\t\"reflect\"\n)\n\n// A structPointer is a pointer to a struct.\ntype structPointer struct {\n\tv reflect.Value\n}\n\n// toStructPointer returns a structPointer equivalent to the given reflect value.\n// The reflect value must itself be a pointer to a struct.\nfunc toStructPointer(v reflect.Value) structPointer {\n\treturn structPointer{v}\n}\n\n// IsNil reports whether p is nil.\nfunc structPointer_IsNil(p structPointer) bool {\n\treturn p.v.IsNil()\n}\n\n// Interface returns the struct pointer as an interface value.\nfunc structPointer_Interface(p structPointer, _ reflect.Type) interface{} {\n\treturn p.v.Interface()\n}\n\n// A field identifies a field in a struct, accessible from a structPointer.\n// In this implementation, a field is identified by the sequence of field indices\n// passed to reflect's FieldByIndex.\ntype field []int\n\n// toField returns a field equivalent to the given reflect field.\nfunc toField(f *reflect.StructField) field {\n\treturn f.Index\n}\n\n// invalidField is an invalid field identifier.\nvar invalidField = field(nil)\n\n// IsValid reports whether the field identifier is valid.\nfunc (f field) IsValid() bool { return f != nil }\n\n// field returns the given field in the struct as a reflect value.\nfunc structPointer_field(p structPointer, f field) reflect.Value {\n\t// Special case: an extension map entry with a value of type T\n\t// passes a *T to the struct-handling code with a zero field,\n\t// expecting that it will be treated as equivalent to *struct{ X T },\n\t// which has the same memory layout. We have to handle that case\n\t// specially, because reflect will panic if we call FieldByIndex on a\n\t// non-struct.\n\tif f == nil {\n\t\treturn p.v.Elem()\n\t}\n\n\treturn p.v.Elem().FieldByIndex(f)\n}\n\n// ifield returns the given field in the struct as an interface value.\nfunc structPointer_ifield(p structPointer, f field) interface{} {\n\treturn structPointer_field(p, f).Addr().Interface()\n}\n\n// Bytes returns the address of a []byte field in the struct.\nfunc structPointer_Bytes(p structPointer, f field) *[]byte {\n\treturn structPointer_ifield(p, f).(*[]byte)\n}\n\n// BytesSlice returns the address of a [][]byte field in the struct.\nfunc structPointer_BytesSlice(p structPointer, f field) *[][]byte {\n\treturn structPointer_ifield(p, f).(*[][]byte)\n}\n\n// Bool returns the address of a *bool field in the struct.\nfunc structPointer_Bool(p structPointer, f field) **bool {\n\treturn structPointer_ifield(p, f).(**bool)\n}\n\n// BoolVal returns the address of a bool field in the struct.\nfunc structPointer_BoolVal(p structPointer, f field) *bool {\n\treturn structPointer_ifield(p, f).(*bool)\n}\n\n// BoolSlice returns the address of a []bool field in the struct.\nfunc structPointer_BoolSlice(p structPointer, f field) *[]bool {\n\treturn structPointer_ifield(p, f).(*[]bool)\n}\n\n// String returns the address of a *string field in the struct.\nfunc structPointer_String(p structPointer, f field) **string {\n\treturn structPointer_ifield(p, f).(**string)\n}\n\n// StringVal returns the address of a string field in the struct.\nfunc structPointer_StringVal(p structPointer, f field) *string {\n\treturn structPointer_ifield(p, f).(*string)\n}\n\n// StringSlice returns the address of a []string field in the struct.\nfunc structPointer_StringSlice(p structPointer, f field) *[]string {\n\treturn structPointer_ifield(p, f).(*[]string)\n}\n\n// ExtMap returns the address of an extension map field in the struct.\nfunc structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {\n\treturn structPointer_ifield(p, f).(*map[int32]Extension)\n}\n\n// Map returns the reflect.Value for the address of a map field in the struct.\nfunc structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {\n\treturn structPointer_field(p, f).Addr()\n}\n\n// SetStructPointer writes a *struct field in the struct.\nfunc structPointer_SetStructPointer(p structPointer, f field, q structPointer) {\n\tstructPointer_field(p, f).Set(q.v)\n}\n\n// GetStructPointer reads a *struct field in the struct.\nfunc structPointer_GetStructPointer(p structPointer, f field) structPointer {\n\treturn structPointer{structPointer_field(p, f)}\n}\n\n// StructPointerSlice the address of a []*struct field in the struct.\nfunc structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {\n\treturn structPointerSlice{structPointer_field(p, f)}\n}\n\n// A structPointerSlice represents the address of a slice of pointers to structs\n// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.\ntype structPointerSlice struct {\n\tv reflect.Value\n}\n\nfunc (p structPointerSlice) Len() int                  { return p.v.Len() }\nfunc (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }\nfunc (p structPointerSlice) Append(q structPointer) {\n\tp.v.Set(reflect.Append(p.v, q.v))\n}\n\nvar (\n\tint32Type   = reflect.TypeOf(int32(0))\n\tuint32Type  = reflect.TypeOf(uint32(0))\n\tfloat32Type = reflect.TypeOf(float32(0))\n\tint64Type   = reflect.TypeOf(int64(0))\n\tuint64Type  = reflect.TypeOf(uint64(0))\n\tfloat64Type = reflect.TypeOf(float64(0))\n)\n\n// A word32 represents a field of type *int32, *uint32, *float32, or *enum.\n// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.\ntype word32 struct {\n\tv reflect.Value\n}\n\n// IsNil reports whether p is nil.\nfunc word32_IsNil(p word32) bool {\n\treturn p.v.IsNil()\n}\n\n// Set sets p to point at a newly allocated word with bits set to x.\nfunc word32_Set(p word32, o *Buffer, x uint32) {\n\tt := p.v.Type().Elem()\n\tswitch t {\n\tcase int32Type:\n\t\tif len(o.int32s) == 0 {\n\t\t\to.int32s = make([]int32, uint32PoolSize)\n\t\t}\n\t\to.int32s[0] = int32(x)\n\t\tp.v.Set(reflect.ValueOf(&o.int32s[0]))\n\t\to.int32s = o.int32s[1:]\n\t\treturn\n\tcase uint32Type:\n\t\tif len(o.uint32s) == 0 {\n\t\t\to.uint32s = make([]uint32, uint32PoolSize)\n\t\t}\n\t\to.uint32s[0] = x\n\t\tp.v.Set(reflect.ValueOf(&o.uint32s[0]))\n\t\to.uint32s = o.uint32s[1:]\n\t\treturn\n\tcase float32Type:\n\t\tif len(o.float32s) == 0 {\n\t\t\to.float32s = make([]float32, uint32PoolSize)\n\t\t}\n\t\to.float32s[0] = math.Float32frombits(x)\n\t\tp.v.Set(reflect.ValueOf(&o.float32s[0]))\n\t\to.float32s = o.float32s[1:]\n\t\treturn\n\t}\n\n\t// must be enum\n\tp.v.Set(reflect.New(t))\n\tp.v.Elem().SetInt(int64(int32(x)))\n}\n\n// Get gets the bits pointed at by p, as a uint32.\nfunc word32_Get(p word32) uint32 {\n\telem := p.v.Elem()\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32(p structPointer, f field) word32 {\n\treturn word32{structPointer_field(p, f)}\n}\n\n// A word32Val represents a field of type int32, uint32, float32, or enum.\n// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.\ntype word32Val struct {\n\tv reflect.Value\n}\n\n// Set sets *p to x.\nfunc word32Val_Set(p word32Val, x uint32) {\n\tswitch p.v.Type() {\n\tcase int32Type:\n\t\tp.v.SetInt(int64(x))\n\t\treturn\n\tcase uint32Type:\n\t\tp.v.SetUint(uint64(x))\n\t\treturn\n\tcase float32Type:\n\t\tp.v.SetFloat(float64(math.Float32frombits(x)))\n\t\treturn\n\t}\n\n\t// must be enum\n\tp.v.SetInt(int64(int32(x)))\n}\n\n// Get gets the bits pointed at by p, as a uint32.\nfunc word32Val_Get(p word32Val) uint32 {\n\telem := p.v\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.\nfunc structPointer_Word32Val(p structPointer, f field) word32Val {\n\treturn word32Val{structPointer_field(p, f)}\n}\n\n// A word32Slice is a slice of 32-bit values.\n// That is, v.Type() is []int32, []uint32, []float32, or []enum.\ntype word32Slice struct {\n\tv reflect.Value\n}\n\nfunc (p word32Slice) Append(x uint32) {\n\tn, m := p.v.Len(), p.v.Cap()\n\tif n < m {\n\t\tp.v.SetLen(n + 1)\n\t} else {\n\t\tt := p.v.Type().Elem()\n\t\tp.v.Set(reflect.Append(p.v, reflect.Zero(t)))\n\t}\n\telem := p.v.Index(n)\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\telem.SetInt(int64(int32(x)))\n\tcase reflect.Uint32:\n\t\telem.SetUint(uint64(x))\n\tcase reflect.Float32:\n\t\telem.SetFloat(float64(math.Float32frombits(x)))\n\t}\n}\n\nfunc (p word32Slice) Len() int {\n\treturn p.v.Len()\n}\n\nfunc (p word32Slice) Index(i int) uint32 {\n\telem := p.v.Index(i)\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.\nfunc structPointer_Word32Slice(p structPointer, f field) word32Slice {\n\treturn word32Slice{structPointer_field(p, f)}\n}\n\n// word64 is like word32 but for 64-bit values.\ntype word64 struct {\n\tv reflect.Value\n}\n\nfunc word64_Set(p word64, o *Buffer, x uint64) {\n\tt := p.v.Type().Elem()\n\tswitch t {\n\tcase int64Type:\n\t\tif len(o.int64s) == 0 {\n\t\t\to.int64s = make([]int64, uint64PoolSize)\n\t\t}\n\t\to.int64s[0] = int64(x)\n\t\tp.v.Set(reflect.ValueOf(&o.int64s[0]))\n\t\to.int64s = o.int64s[1:]\n\t\treturn\n\tcase uint64Type:\n\t\tif len(o.uint64s) == 0 {\n\t\t\to.uint64s = make([]uint64, uint64PoolSize)\n\t\t}\n\t\to.uint64s[0] = x\n\t\tp.v.Set(reflect.ValueOf(&o.uint64s[0]))\n\t\to.uint64s = o.uint64s[1:]\n\t\treturn\n\tcase float64Type:\n\t\tif len(o.float64s) == 0 {\n\t\t\to.float64s = make([]float64, uint64PoolSize)\n\t\t}\n\t\to.float64s[0] = math.Float64frombits(x)\n\t\tp.v.Set(reflect.ValueOf(&o.float64s[0]))\n\t\to.float64s = o.float64s[1:]\n\t\treturn\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc word64_IsNil(p word64) bool {\n\treturn p.v.IsNil()\n}\n\nfunc word64_Get(p word64) uint64 {\n\telem := p.v.Elem()\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn elem.Uint()\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(elem.Float())\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64(p structPointer, f field) word64 {\n\treturn word64{structPointer_field(p, f)}\n}\n\n// word64Val is like word32Val but for 64-bit values.\ntype word64Val struct {\n\tv reflect.Value\n}\n\nfunc word64Val_Set(p word64Val, o *Buffer, x uint64) {\n\tswitch p.v.Type() {\n\tcase int64Type:\n\t\tp.v.SetInt(int64(x))\n\t\treturn\n\tcase uint64Type:\n\t\tp.v.SetUint(x)\n\t\treturn\n\tcase float64Type:\n\t\tp.v.SetFloat(math.Float64frombits(x))\n\t\treturn\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc word64Val_Get(p word64Val) uint64 {\n\telem := p.v\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn elem.Uint()\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(elem.Float())\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64Val(p structPointer, f field) word64Val {\n\treturn word64Val{structPointer_field(p, f)}\n}\n\ntype word64Slice struct {\n\tv reflect.Value\n}\n\nfunc (p word64Slice) Append(x uint64) {\n\tn, m := p.v.Len(), p.v.Cap()\n\tif n < m {\n\t\tp.v.SetLen(n + 1)\n\t} else {\n\t\tt := p.v.Type().Elem()\n\t\tp.v.Set(reflect.Append(p.v, reflect.Zero(t)))\n\t}\n\telem := p.v.Index(n)\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\telem.SetInt(int64(int64(x)))\n\tcase reflect.Uint64:\n\t\telem.SetUint(uint64(x))\n\tcase reflect.Float64:\n\t\telem.SetFloat(float64(math.Float64frombits(x)))\n\t}\n}\n\nfunc (p word64Slice) Len() int {\n\treturn p.v.Len()\n}\n\nfunc (p word64Slice) Index(i int) uint64 {\n\telem := p.v.Index(i)\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn uint64(elem.Uint())\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(float64(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64Slice(p structPointer, f field) word64Slice {\n\treturn word64Slice{structPointer_field(p, f)}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/pointer_unsafe.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build !appengine\n\n// This file contains the implementation of the proto field accesses using package unsafe.\n\npackage proto\n\nimport (\n\t\"reflect\"\n\t\"unsafe\"\n)\n\n// NOTE: These type_Foo functions would more idiomatically be methods,\n// but Go does not allow methods on pointer types, and we must preserve\n// some pointer type for the garbage collector. We use these\n// funcs with clunky names as our poor approximation to methods.\n//\n// An alternative would be\n//\ttype structPointer struct { p unsafe.Pointer }\n// but that does not registerize as well.\n\n// A structPointer is a pointer to a struct.\ntype structPointer unsafe.Pointer\n\n// toStructPointer returns a structPointer equivalent to the given reflect value.\nfunc toStructPointer(v reflect.Value) structPointer {\n\treturn structPointer(unsafe.Pointer(v.Pointer()))\n}\n\n// IsNil reports whether p is nil.\nfunc structPointer_IsNil(p structPointer) bool {\n\treturn p == nil\n}\n\n// Interface returns the struct pointer, assumed to have element type t,\n// as an interface value.\nfunc structPointer_Interface(p structPointer, t reflect.Type) interface{} {\n\treturn reflect.NewAt(t, unsafe.Pointer(p)).Interface()\n}\n\n// A field identifies a field in a struct, accessible from a structPointer.\n// In this implementation, a field is identified by its byte offset from the start of the struct.\ntype field uintptr\n\n// toField returns a field equivalent to the given reflect field.\nfunc toField(f *reflect.StructField) field {\n\treturn field(f.Offset)\n}\n\n// invalidField is an invalid field identifier.\nconst invalidField = ^field(0)\n\n// IsValid reports whether the field identifier is valid.\nfunc (f field) IsValid() bool {\n\treturn f != ^field(0)\n}\n\n// Bytes returns the address of a []byte field in the struct.\nfunc structPointer_Bytes(p structPointer, f field) *[]byte {\n\treturn (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BytesSlice returns the address of a [][]byte field in the struct.\nfunc structPointer_BytesSlice(p structPointer, f field) *[][]byte {\n\treturn (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// Bool returns the address of a *bool field in the struct.\nfunc structPointer_Bool(p structPointer, f field) **bool {\n\treturn (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BoolVal returns the address of a bool field in the struct.\nfunc structPointer_BoolVal(p structPointer, f field) *bool {\n\treturn (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BoolSlice returns the address of a []bool field in the struct.\nfunc structPointer_BoolSlice(p structPointer, f field) *[]bool {\n\treturn (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// String returns the address of a *string field in the struct.\nfunc structPointer_String(p structPointer, f field) **string {\n\treturn (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StringVal returns the address of a string field in the struct.\nfunc structPointer_StringVal(p structPointer, f field) *string {\n\treturn (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StringSlice returns the address of a []string field in the struct.\nfunc structPointer_StringSlice(p structPointer, f field) *[]string {\n\treturn (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// ExtMap returns the address of an extension map field in the struct.\nfunc structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {\n\treturn (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// Map returns the reflect.Value for the address of a map field in the struct.\nfunc structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {\n\treturn reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))\n}\n\n// SetStructPointer writes a *struct field in the struct.\nfunc structPointer_SetStructPointer(p structPointer, f field, q structPointer) {\n\t*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q\n}\n\n// GetStructPointer reads a *struct field in the struct.\nfunc structPointer_GetStructPointer(p structPointer, f field) structPointer {\n\treturn *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StructPointerSlice the address of a []*struct field in the struct.\nfunc structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {\n\treturn (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).\ntype structPointerSlice []structPointer\n\nfunc (v *structPointerSlice) Len() int                  { return len(*v) }\nfunc (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }\nfunc (v *structPointerSlice) Append(p structPointer)    { *v = append(*v, p) }\n\n// A word32 is the address of a \"pointer to 32-bit value\" field.\ntype word32 **uint32\n\n// IsNil reports whether *v is nil.\nfunc word32_IsNil(p word32) bool {\n\treturn *p == nil\n}\n\n// Set sets *v to point at a newly allocated word set to x.\nfunc word32_Set(p word32, o *Buffer, x uint32) {\n\tif len(o.uint32s) == 0 {\n\t\to.uint32s = make([]uint32, uint32PoolSize)\n\t}\n\to.uint32s[0] = x\n\t*p = &o.uint32s[0]\n\to.uint32s = o.uint32s[1:]\n}\n\n// Get gets the value pointed at by *v.\nfunc word32_Get(p word32) uint32 {\n\treturn **p\n}\n\n// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32(p structPointer, f field) word32 {\n\treturn word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// A word32Val is the address of a 32-bit value field.\ntype word32Val *uint32\n\n// Set sets *p to x.\nfunc word32Val_Set(p word32Val, x uint32) {\n\t*p = x\n}\n\n// Get gets the value pointed at by p.\nfunc word32Val_Get(p word32Val) uint32 {\n\treturn *p\n}\n\n// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32Val(p structPointer, f field) word32Val {\n\treturn word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// A word32Slice is a slice of 32-bit values.\ntype word32Slice []uint32\n\nfunc (v *word32Slice) Append(x uint32)    { *v = append(*v, x) }\nfunc (v *word32Slice) Len() int           { return len(*v) }\nfunc (v *word32Slice) Index(i int) uint32 { return (*v)[i] }\n\n// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.\nfunc structPointer_Word32Slice(p structPointer, f field) *word32Slice {\n\treturn (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// word64 is like word32 but for 64-bit values.\ntype word64 **uint64\n\nfunc word64_Set(p word64, o *Buffer, x uint64) {\n\tif len(o.uint64s) == 0 {\n\t\to.uint64s = make([]uint64, uint64PoolSize)\n\t}\n\to.uint64s[0] = x\n\t*p = &o.uint64s[0]\n\to.uint64s = o.uint64s[1:]\n}\n\nfunc word64_IsNil(p word64) bool {\n\treturn *p == nil\n}\n\nfunc word64_Get(p word64) uint64 {\n\treturn **p\n}\n\nfunc structPointer_Word64(p structPointer, f field) word64 {\n\treturn word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// word64Val is like word32Val but for 64-bit values.\ntype word64Val *uint64\n\nfunc word64Val_Set(p word64Val, o *Buffer, x uint64) {\n\t*p = x\n}\n\nfunc word64Val_Get(p word64Val) uint64 {\n\treturn *p\n}\n\nfunc structPointer_Word64Val(p structPointer, f field) word64Val {\n\treturn word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// word64Slice is like word32Slice but for 64-bit values.\ntype word64Slice []uint64\n\nfunc (v *word64Slice) Append(x uint64)    { *v = append(*v, x) }\nfunc (v *word64Slice) Len() int           { return len(*v) }\nfunc (v *word64Slice) Index(i int) uint64 { return (*v)[i] }\n\nfunc structPointer_Word64Slice(p structPointer, f field) *word64Slice {\n\treturn (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/properties.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for encoding data into the wire format for protocol buffers.\n */\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst debug bool = false\n\n// Constants that identify the encoding of a value on the wire.\nconst (\n\tWireVarint     = 0\n\tWireFixed64    = 1\n\tWireBytes      = 2\n\tWireStartGroup = 3\n\tWireEndGroup   = 4\n\tWireFixed32    = 5\n)\n\nconst startSize = 10 // initial slice/string sizes\n\n// Encoders are defined in encode.go\n// An encoder outputs the full representation of a field, including its\n// tag and encoder type.\ntype encoder func(p *Buffer, prop *Properties, base structPointer) error\n\n// A valueEncoder encodes a single integer in a particular encoding.\ntype valueEncoder func(o *Buffer, x uint64) error\n\n// Sizers are defined in encode.go\n// A sizer returns the encoded size of a field, including its tag and encoder\n// type.\ntype sizer func(prop *Properties, base structPointer) int\n\n// A valueSizer returns the encoded size of a single integer in a particular\n// encoding.\ntype valueSizer func(x uint64) int\n\n// Decoders are defined in decode.go\n// A decoder creates a value from its wire representation.\n// Unrecognized subelements are saved in unrec.\ntype decoder func(p *Buffer, prop *Properties, base structPointer) error\n\n// A valueDecoder decodes a single integer in a particular encoding.\ntype valueDecoder func(o *Buffer) (x uint64, err error)\n\n// tagMap is an optimization over map[int]int for typical protocol buffer\n// use-cases. Encoded protocol buffers are often in tag order with small tag\n// numbers.\ntype tagMap struct {\n\tfastTags []int\n\tslowTags map[int]int\n}\n\n// tagMapFastLimit is the upper bound on the tag number that will be stored in\n// the tagMap slice rather than its map.\nconst tagMapFastLimit = 1024\n\nfunc (p *tagMap) get(t int) (int, bool) {\n\tif t > 0 && t < tagMapFastLimit {\n\t\tif t >= len(p.fastTags) {\n\t\t\treturn 0, false\n\t\t}\n\t\tfi := p.fastTags[t]\n\t\treturn fi, fi >= 0\n\t}\n\tfi, ok := p.slowTags[t]\n\treturn fi, ok\n}\n\nfunc (p *tagMap) put(t int, fi int) {\n\tif t > 0 && t < tagMapFastLimit {\n\t\tfor len(p.fastTags) < t+1 {\n\t\t\tp.fastTags = append(p.fastTags, -1)\n\t\t}\n\t\tp.fastTags[t] = fi\n\t\treturn\n\t}\n\tif p.slowTags == nil {\n\t\tp.slowTags = make(map[int]int)\n\t}\n\tp.slowTags[t] = fi\n}\n\n// StructProperties represents properties for all the fields of a struct.\n// decoderTags and decoderOrigNames should only be used by the decoder.\ntype StructProperties struct {\n\tProp             []*Properties  // properties for each field\n\treqCount         int            // required count\n\tdecoderTags      tagMap         // map from proto tag to struct field number\n\tdecoderOrigNames map[string]int // map from original name to struct field number\n\torder            []int          // list of struct field numbers in tag order\n\tunrecField       field          // field id of the XXX_unrecognized []byte field\n\textendable       bool           // is this an extendable proto\n}\n\n// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.\n// See encode.go, (*Buffer).enc_struct.\n\nfunc (sp *StructProperties) Len() int { return len(sp.order) }\nfunc (sp *StructProperties) Less(i, j int) bool {\n\treturn sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag\n}\nfunc (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }\n\n// Properties represents the protocol-specific behavior of a single struct field.\ntype Properties struct {\n\tName     string // name of the field, for error messages\n\tOrigName string // original name before protocol compiler (always set)\n\tWire     string\n\tWireType int\n\tTag      int\n\tRequired bool\n\tOptional bool\n\tRepeated bool\n\tPacked   bool   // relevant for repeated primitives only\n\tEnum     string // set for enum types only\n\tproto3   bool   // whether this is known to be a proto3 field; set for []byte only\n\n\tDefault    string // default value\n\tHasDefault bool   // whether an explicit default was provided\n\tdef_uint64 uint64\n\n\tenc           encoder\n\tvalEnc        valueEncoder // set for bool and numeric types only\n\tfield         field\n\ttagcode       []byte // encoding of EncodeVarint((Tag<<3)|WireType)\n\ttagbuf        [8]byte\n\tstype         reflect.Type      // set for struct types only\n\tsprop         *StructProperties // set for struct types only\n\tisMarshaler   bool\n\tisUnmarshaler bool\n\n\tmtype    reflect.Type // set for map types only\n\tmkeyprop *Properties  // set for map types only\n\tmvalprop *Properties  // set for map types only\n\n\tsize    sizer\n\tvalSize valueSizer // set for bool and numeric types only\n\n\tdec    decoder\n\tvalDec valueDecoder // set for bool and numeric types only\n\n\t// If this is a packable field, this will be the decoder for the packed version of the field.\n\tpackedDec decoder\n}\n\n// String formats the properties in the protobuf struct field tag style.\nfunc (p *Properties) String() string {\n\ts := p.Wire\n\ts = \",\"\n\ts += strconv.Itoa(p.Tag)\n\tif p.Required {\n\t\ts += \",req\"\n\t}\n\tif p.Optional {\n\t\ts += \",opt\"\n\t}\n\tif p.Repeated {\n\t\ts += \",rep\"\n\t}\n\tif p.Packed {\n\t\ts += \",packed\"\n\t}\n\tif p.OrigName != p.Name {\n\t\ts += \",name=\" + p.OrigName\n\t}\n\tif p.proto3 {\n\t\ts += \",proto3\"\n\t}\n\tif len(p.Enum) > 0 {\n\t\ts += \",enum=\" + p.Enum\n\t}\n\tif p.HasDefault {\n\t\ts += \",def=\" + p.Default\n\t}\n\treturn s\n}\n\n// Parse populates p by parsing a string in the protobuf struct field tag style.\nfunc (p *Properties) Parse(s string) {\n\t// \"bytes,49,opt,name=foo,def=hello!\"\n\tfields := strings.Split(s, \",\") // breaks def=, but handled below.\n\tif len(fields) < 2 {\n\t\tfmt.Fprintf(os.Stderr, \"proto: tag has too few fields: %q\\n\", s)\n\t\treturn\n\t}\n\n\tp.Wire = fields[0]\n\tswitch p.Wire {\n\tcase \"varint\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeVarint\n\t\tp.valDec = (*Buffer).DecodeVarint\n\t\tp.valSize = sizeVarint\n\tcase \"fixed32\":\n\t\tp.WireType = WireFixed32\n\t\tp.valEnc = (*Buffer).EncodeFixed32\n\t\tp.valDec = (*Buffer).DecodeFixed32\n\t\tp.valSize = sizeFixed32\n\tcase \"fixed64\":\n\t\tp.WireType = WireFixed64\n\t\tp.valEnc = (*Buffer).EncodeFixed64\n\t\tp.valDec = (*Buffer).DecodeFixed64\n\t\tp.valSize = sizeFixed64\n\tcase \"zigzag32\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeZigzag32\n\t\tp.valDec = (*Buffer).DecodeZigzag32\n\t\tp.valSize = sizeZigzag32\n\tcase \"zigzag64\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeZigzag64\n\t\tp.valDec = (*Buffer).DecodeZigzag64\n\t\tp.valSize = sizeZigzag64\n\tcase \"bytes\", \"group\":\n\t\tp.WireType = WireBytes\n\t\t// no numeric converter for non-numeric types\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"proto: tag has unknown wire type: %q\\n\", s)\n\t\treturn\n\t}\n\n\tvar err error\n\tp.Tag, err = strconv.Atoi(fields[1])\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 2; i < len(fields); i++ {\n\t\tf := fields[i]\n\t\tswitch {\n\t\tcase f == \"req\":\n\t\t\tp.Required = true\n\t\tcase f == \"opt\":\n\t\t\tp.Optional = true\n\t\tcase f == \"rep\":\n\t\t\tp.Repeated = true\n\t\tcase f == \"packed\":\n\t\t\tp.Packed = true\n\t\tcase strings.HasPrefix(f, \"name=\"):\n\t\t\tp.OrigName = f[5:]\n\t\tcase strings.HasPrefix(f, \"enum=\"):\n\t\t\tp.Enum = f[5:]\n\t\tcase f == \"proto3\":\n\t\t\tp.proto3 = true\n\t\tcase strings.HasPrefix(f, \"def=\"):\n\t\t\tp.HasDefault = true\n\t\t\tp.Default = f[4:] // rest of string\n\t\t\tif i+1 < len(fields) {\n\t\t\t\t// Commas aren't escaped, and def is always last.\n\t\t\t\tp.Default += \",\" + strings.Join(fields[i+1:], \",\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc logNoSliceEnc(t1, t2 reflect.Type) {\n\tfmt.Fprintf(os.Stderr, \"proto: no slice oenc for %T = []%T\\n\", t1, t2)\n}\n\nvar protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()\n\n// Initialize the fields for encoding and decoding.\nfunc (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {\n\tp.enc = nil\n\tp.dec = nil\n\tp.size = nil\n\n\tswitch t1 := typ; t1.Kind() {\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"proto: no coders for %v\\n\", t1)\n\n\t// proto3 scalar types\n\n\tcase reflect.Bool:\n\t\tp.enc = (*Buffer).enc_proto3_bool\n\t\tp.dec = (*Buffer).dec_proto3_bool\n\t\tp.size = size_proto3_bool\n\tcase reflect.Int32:\n\t\tp.enc = (*Buffer).enc_proto3_int32\n\t\tp.dec = (*Buffer).dec_proto3_int32\n\t\tp.size = size_proto3_int32\n\tcase reflect.Uint32:\n\t\tp.enc = (*Buffer).enc_proto3_uint32\n\t\tp.dec = (*Buffer).dec_proto3_int32 // can reuse\n\t\tp.size = size_proto3_uint32\n\tcase reflect.Int64, reflect.Uint64:\n\t\tp.enc = (*Buffer).enc_proto3_int64\n\t\tp.dec = (*Buffer).dec_proto3_int64\n\t\tp.size = size_proto3_int64\n\tcase reflect.Float32:\n\t\tp.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits\n\t\tp.dec = (*Buffer).dec_proto3_int32\n\t\tp.size = size_proto3_uint32\n\tcase reflect.Float64:\n\t\tp.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits\n\t\tp.dec = (*Buffer).dec_proto3_int64\n\t\tp.size = size_proto3_int64\n\tcase reflect.String:\n\t\tp.enc = (*Buffer).enc_proto3_string\n\t\tp.dec = (*Buffer).dec_proto3_string\n\t\tp.size = size_proto3_string\n\n\tcase reflect.Ptr:\n\t\tswitch t2 := t1.Elem(); t2.Kind() {\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"proto: no encoder function for %v -> %v\\n\", t1, t2)\n\t\t\tbreak\n\t\tcase reflect.Bool:\n\t\t\tp.enc = (*Buffer).enc_bool\n\t\t\tp.dec = (*Buffer).dec_bool\n\t\t\tp.size = size_bool\n\t\tcase reflect.Int32:\n\t\t\tp.enc = (*Buffer).enc_int32\n\t\t\tp.dec = (*Buffer).dec_int32\n\t\t\tp.size = size_int32\n\t\tcase reflect.Uint32:\n\t\t\tp.enc = (*Buffer).enc_uint32\n\t\t\tp.dec = (*Buffer).dec_int32 // can reuse\n\t\t\tp.size = size_uint32\n\t\tcase reflect.Int64, reflect.Uint64:\n\t\t\tp.enc = (*Buffer).enc_int64\n\t\t\tp.dec = (*Buffer).dec_int64\n\t\t\tp.size = size_int64\n\t\tcase reflect.Float32:\n\t\t\tp.enc = (*Buffer).enc_uint32 // can just treat them as bits\n\t\t\tp.dec = (*Buffer).dec_int32\n\t\t\tp.size = size_uint32\n\t\tcase reflect.Float64:\n\t\t\tp.enc = (*Buffer).enc_int64 // can just treat them as bits\n\t\t\tp.dec = (*Buffer).dec_int64\n\t\t\tp.size = size_int64\n\t\tcase reflect.String:\n\t\t\tp.enc = (*Buffer).enc_string\n\t\t\tp.dec = (*Buffer).dec_string\n\t\t\tp.size = size_string\n\t\tcase reflect.Struct:\n\t\t\tp.stype = t1.Elem()\n\t\t\tp.isMarshaler = isMarshaler(t1)\n\t\t\tp.isUnmarshaler = isUnmarshaler(t1)\n\t\t\tif p.Wire == \"bytes\" {\n\t\t\t\tp.enc = (*Buffer).enc_struct_message\n\t\t\t\tp.dec = (*Buffer).dec_struct_message\n\t\t\t\tp.size = size_struct_message\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_struct_group\n\t\t\t\tp.dec = (*Buffer).dec_struct_group\n\t\t\t\tp.size = size_struct_group\n\t\t\t}\n\t\t}\n\n\tcase reflect.Slice:\n\t\tswitch t2 := t1.Elem(); t2.Kind() {\n\t\tdefault:\n\t\t\tlogNoSliceEnc(t1, t2)\n\t\t\tbreak\n\t\tcase reflect.Bool:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_bool\n\t\t\t\tp.size = size_slice_packed_bool\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_bool\n\t\t\t\tp.size = size_slice_bool\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_bool\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_bool\n\t\tcase reflect.Int32:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int32\n\t\t\t\tp.size = size_slice_packed_int32\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_int32\n\t\t\t\tp.size = size_slice_int32\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\tcase reflect.Uint32:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_uint32\n\t\t\t\tp.size = size_slice_packed_uint32\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_uint32\n\t\t\t\tp.size = size_slice_uint32\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\tcase reflect.Int64, reflect.Uint64:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int64\n\t\t\t\tp.size = size_slice_packed_int64\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_int64\n\t\t\t\tp.size = size_slice_int64\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int64\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int64\n\t\tcase reflect.Uint8:\n\t\t\tp.enc = (*Buffer).enc_slice_byte\n\t\t\tp.dec = (*Buffer).dec_slice_byte\n\t\t\tp.size = size_slice_byte\n\t\t\t// This is a []byte, which is either a bytes field,\n\t\t\t// or the value of a map field. In the latter case,\n\t\t\t// we always encode an empty []byte, so we should not\n\t\t\t// use the proto3 enc/size funcs.\n\t\t\t// f == nil iff this is the key/value of a map field.\n\t\t\tif p.proto3 && f != nil {\n\t\t\t\tp.enc = (*Buffer).enc_proto3_slice_byte\n\t\t\t\tp.size = size_proto3_slice_byte\n\t\t\t}\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tswitch t2.Bits() {\n\t\t\tcase 32:\n\t\t\t\t// can just treat them as bits\n\t\t\t\tif p.Packed {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_packed_uint32\n\t\t\t\t\tp.size = size_slice_packed_uint32\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_uint32\n\t\t\t\t\tp.size = size_slice_uint32\n\t\t\t\t}\n\t\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\t\tcase 64:\n\t\t\t\t// can just treat them as bits\n\t\t\t\tif p.Packed {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int64\n\t\t\t\t\tp.size = size_slice_packed_int64\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_int64\n\t\t\t\t\tp.size = size_slice_int64\n\t\t\t\t}\n\t\t\t\tp.dec = (*Buffer).dec_slice_int64\n\t\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int64\n\t\t\tdefault:\n\t\t\t\tlogNoSliceEnc(t1, t2)\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase reflect.String:\n\t\t\tp.enc = (*Buffer).enc_slice_string\n\t\t\tp.dec = (*Buffer).dec_slice_string\n\t\t\tp.size = size_slice_string\n\t\tcase reflect.Ptr:\n\t\t\tswitch t3 := t2.Elem(); t3.Kind() {\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"proto: no ptr oenc for %T -> %T -> %T\\n\", t1, t2, t3)\n\t\t\t\tbreak\n\t\t\tcase reflect.Struct:\n\t\t\t\tp.stype = t2.Elem()\n\t\t\t\tp.isMarshaler = isMarshaler(t2)\n\t\t\t\tp.isUnmarshaler = isUnmarshaler(t2)\n\t\t\t\tif p.Wire == \"bytes\" {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_struct_message\n\t\t\t\t\tp.dec = (*Buffer).dec_slice_struct_message\n\t\t\t\t\tp.size = size_slice_struct_message\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_struct_group\n\t\t\t\t\tp.dec = (*Buffer).dec_slice_struct_group\n\t\t\t\t\tp.size = size_slice_struct_group\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tswitch t2.Elem().Kind() {\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"proto: no slice elem oenc for %T -> %T -> %T\\n\", t1, t2, t2.Elem())\n\t\t\t\tbreak\n\t\t\tcase reflect.Uint8:\n\t\t\t\tp.enc = (*Buffer).enc_slice_slice_byte\n\t\t\t\tp.dec = (*Buffer).dec_slice_slice_byte\n\t\t\t\tp.size = size_slice_slice_byte\n\t\t\t}\n\t\t}\n\n\tcase reflect.Map:\n\t\tp.enc = (*Buffer).enc_new_map\n\t\tp.dec = (*Buffer).dec_new_map\n\t\tp.size = size_new_map\n\n\t\tp.mtype = t1\n\t\tp.mkeyprop = &Properties{}\n\t\tp.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), \"Key\", f.Tag.Get(\"protobuf_key\"), nil, lockGetProp)\n\t\tp.mvalprop = &Properties{}\n\t\tvtype := p.mtype.Elem()\n\t\tif vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {\n\t\t\t// The value type is not a message (*T) or bytes ([]byte),\n\t\t\t// so we need encoders for the pointer to this type.\n\t\t\tvtype = reflect.PtrTo(vtype)\n\t\t}\n\t\tp.mvalprop.init(vtype, \"Value\", f.Tag.Get(\"protobuf_val\"), nil, lockGetProp)\n\t}\n\n\t// precalculate tag code\n\twire := p.WireType\n\tif p.Packed {\n\t\twire = WireBytes\n\t}\n\tx := uint32(p.Tag)<<3 | uint32(wire)\n\ti := 0\n\tfor i = 0; x > 127; i++ {\n\t\tp.tagbuf[i] = 0x80 | uint8(x&0x7F)\n\t\tx >>= 7\n\t}\n\tp.tagbuf[i] = uint8(x)\n\tp.tagcode = p.tagbuf[0 : i+1]\n\n\tif p.stype != nil {\n\t\tif lockGetProp {\n\t\t\tp.sprop = GetProperties(p.stype)\n\t\t} else {\n\t\t\tp.sprop = getPropertiesLocked(p.stype)\n\t\t}\n\t}\n}\n\nvar (\n\tmarshalerType   = reflect.TypeOf((*Marshaler)(nil)).Elem()\n\tunmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()\n)\n\n// isMarshaler reports whether type t implements Marshaler.\nfunc isMarshaler(t reflect.Type) bool {\n\t// We're checking for (likely) pointer-receiver methods\n\t// so if t is not a pointer, something is very wrong.\n\t// The calls above only invoke isMarshaler on pointer types.\n\tif t.Kind() != reflect.Ptr {\n\t\tpanic(\"proto: misuse of isMarshaler\")\n\t}\n\treturn t.Implements(marshalerType)\n}\n\n// isUnmarshaler reports whether type t implements Unmarshaler.\nfunc isUnmarshaler(t reflect.Type) bool {\n\t// We're checking for (likely) pointer-receiver methods\n\t// so if t is not a pointer, something is very wrong.\n\t// The calls above only invoke isUnmarshaler on pointer types.\n\tif t.Kind() != reflect.Ptr {\n\t\tpanic(\"proto: misuse of isUnmarshaler\")\n\t}\n\treturn t.Implements(unmarshalerType)\n}\n\n// Init populates the properties from a protocol buffer struct tag.\nfunc (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {\n\tp.init(typ, name, tag, f, true)\n}\n\nfunc (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {\n\t// \"bytes,49,opt,def=hello!\"\n\tp.Name = name\n\tp.OrigName = name\n\tif f != nil {\n\t\tp.field = toField(f)\n\t}\n\tif tag == \"\" {\n\t\treturn\n\t}\n\tp.Parse(tag)\n\tp.setEncAndDec(typ, f, lockGetProp)\n}\n\nvar (\n\tpropertiesMu  sync.RWMutex\n\tpropertiesMap = make(map[reflect.Type]*StructProperties)\n)\n\n// GetProperties returns the list of properties for the type represented by t.\n// t must represent a generated struct type of a protocol message.\nfunc GetProperties(t reflect.Type) *StructProperties {\n\tif t.Kind() != reflect.Struct {\n\t\tpanic(\"proto: type must have kind struct\")\n\t}\n\n\t// Most calls to GetProperties in a long-running program will be\n\t// retrieving details for types we have seen before.\n\tpropertiesMu.RLock()\n\tsprop, ok := propertiesMap[t]\n\tpropertiesMu.RUnlock()\n\tif ok {\n\t\tif collectStats {\n\t\t\tstats.Chit++\n\t\t}\n\t\treturn sprop\n\t}\n\n\tpropertiesMu.Lock()\n\tsprop = getPropertiesLocked(t)\n\tpropertiesMu.Unlock()\n\treturn sprop\n}\n\n// getPropertiesLocked requires that propertiesMu is held.\nfunc getPropertiesLocked(t reflect.Type) *StructProperties {\n\tif prop, ok := propertiesMap[t]; ok {\n\t\tif collectStats {\n\t\t\tstats.Chit++\n\t\t}\n\t\treturn prop\n\t}\n\tif collectStats {\n\t\tstats.Cmiss++\n\t}\n\n\tprop := new(StructProperties)\n\t// in case of recursive protos, fill this in now.\n\tpropertiesMap[t] = prop\n\n\t// build properties\n\tprop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)\n\tprop.unrecField = invalidField\n\tprop.Prop = make([]*Properties, t.NumField())\n\tprop.order = make([]int, t.NumField())\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tp := new(Properties)\n\t\tname := f.Name\n\t\tp.init(f.Type, name, f.Tag.Get(\"protobuf\"), &f, false)\n\n\t\tif f.Name == \"XXX_extensions\" { // special case\n\t\t\tp.enc = (*Buffer).enc_map\n\t\t\tp.dec = nil // not needed\n\t\t\tp.size = size_map\n\t\t}\n\t\tif f.Name == \"XXX_unrecognized\" { // special case\n\t\t\tprop.unrecField = toField(&f)\n\t\t}\n\t\tprop.Prop[i] = p\n\t\tprop.order[i] = i\n\t\tif debug {\n\t\t\tprint(i, \" \", f.Name, \" \", t.String(), \" \")\n\t\t\tif p.Tag > 0 {\n\t\t\t\tprint(p.String())\n\t\t\t}\n\t\t\tprint(\"\\n\")\n\t\t}\n\t\tif p.enc == nil && !strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tfmt.Fprintln(os.Stderr, \"proto: no encoder for\", f.Name, f.Type.String(), \"[GetProperties]\")\n\t\t}\n\t}\n\n\t// Re-order prop.order.\n\tsort.Sort(prop)\n\n\t// build required counts\n\t// build tags\n\treqCount := 0\n\tprop.decoderOrigNames = make(map[string]int)\n\tfor i, p := range prop.Prop {\n\t\tif strings.HasPrefix(p.Name, \"XXX_\") {\n\t\t\t// Internal fields should not appear in tags/origNames maps.\n\t\t\t// They are handled specially when encoding and decoding.\n\t\t\tcontinue\n\t\t}\n\t\tif p.Required {\n\t\t\treqCount++\n\t\t}\n\t\tprop.decoderTags.put(p.Tag, i)\n\t\tprop.decoderOrigNames[p.OrigName] = i\n\t}\n\tprop.reqCount = reqCount\n\n\treturn prop\n}\n\n// Return the Properties object for the x[0]'th field of the structure.\nfunc propByIndex(t reflect.Type, x []int) *Properties {\n\tif len(x) != 1 {\n\t\tfmt.Fprintf(os.Stderr, \"proto: field index dimension %d (not 1) for type %s\\n\", len(x), t)\n\t\treturn nil\n\t}\n\tprop := GetProperties(t)\n\treturn prop.Prop[x[0]]\n}\n\n// Get the address and type of a pointer to a struct from an interface.\nfunc getbase(pb Message) (t reflect.Type, b structPointer, err error) {\n\tif pb == nil {\n\t\terr = ErrNil\n\t\treturn\n\t}\n\t// get the reflect type of the pointer to the struct.\n\tt = reflect.TypeOf(pb)\n\t// get the address of the struct.\n\tvalue := reflect.ValueOf(pb)\n\tb = toStructPointer(value)\n\treturn\n}\n\n// A global registry of enum types.\n// The generated code will register the generated maps by calling RegisterEnum.\n\nvar enumValueMaps = make(map[string]map[string]int32)\n\n// RegisterEnum is called from the generated code to install the enum descriptor\n// maps into the global table to aid parsing text format protocol buffers.\nfunc RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {\n\tif _, ok := enumValueMaps[typeName]; ok {\n\t\tpanic(\"proto: duplicate enum registered: \" + typeName)\n\t}\n\tenumValueMaps[typeName] = valueMap\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: proto3_proto/proto3.proto\n// DO NOT EDIT!\n\n/*\nPackage proto3_proto is a generated protocol buffer package.\n\nIt is generated from these files:\n\tproto3_proto/proto3.proto\n\nIt has these top-level messages:\n\tMessage\n\tNested\n\tMessageWithMap\n*/\npackage proto3_proto\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport testdata \"github.com/golang/protobuf/proto/testdata\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\ntype Message_Humour int32\n\nconst (\n\tMessage_UNKNOWN     Message_Humour = 0\n\tMessage_PUNS        Message_Humour = 1\n\tMessage_SLAPSTICK   Message_Humour = 2\n\tMessage_BILL_BAILEY Message_Humour = 3\n)\n\nvar Message_Humour_name = map[int32]string{\n\t0: \"UNKNOWN\",\n\t1: \"PUNS\",\n\t2: \"SLAPSTICK\",\n\t3: \"BILL_BAILEY\",\n}\nvar Message_Humour_value = map[string]int32{\n\t\"UNKNOWN\":     0,\n\t\"PUNS\":        1,\n\t\"SLAPSTICK\":   2,\n\t\"BILL_BAILEY\": 3,\n}\n\nfunc (x Message_Humour) String() string {\n\treturn proto.EnumName(Message_Humour_name, int32(x))\n}\n\ntype Message struct {\n\tName         string                           `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tHilarity     Message_Humour                   `protobuf:\"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour\" json:\"hilarity,omitempty\"`\n\tHeightInCm   uint32                           `protobuf:\"varint,3,opt,name=height_in_cm\" json:\"height_in_cm,omitempty\"`\n\tData         []byte                           `protobuf:\"bytes,4,opt,name=data,proto3\" json:\"data,omitempty\"`\n\tResultCount  int64                            `protobuf:\"varint,7,opt,name=result_count\" json:\"result_count,omitempty\"`\n\tTrueScotsman bool                             `protobuf:\"varint,8,opt,name=true_scotsman\" json:\"true_scotsman,omitempty\"`\n\tScore        float32                          `protobuf:\"fixed32,9,opt,name=score\" json:\"score,omitempty\"`\n\tKey          []uint64                         `protobuf:\"varint,5,rep,name=key\" json:\"key,omitempty\"`\n\tNested       *Nested                          `protobuf:\"bytes,6,opt,name=nested\" json:\"nested,omitempty\"`\n\tTerrain      map[string]*Nested               `protobuf:\"bytes,10,rep,name=terrain\" json:\"terrain,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tProto2Field  *testdata.SubDefaults            `protobuf:\"bytes,11,opt,name=proto2_field\" json:\"proto2_field,omitempty\"`\n\tProto2Value  map[string]*testdata.SubDefaults `protobuf:\"bytes,13,rep,name=proto2_value\" json:\"proto2_value,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n}\n\nfunc (m *Message) Reset()         { *m = Message{} }\nfunc (m *Message) String() string { return proto.CompactTextString(m) }\nfunc (*Message) ProtoMessage()    {}\n\nfunc (m *Message) GetNested() *Nested {\n\tif m != nil {\n\t\treturn m.Nested\n\t}\n\treturn nil\n}\n\nfunc (m *Message) GetTerrain() map[string]*Nested {\n\tif m != nil {\n\t\treturn m.Terrain\n\t}\n\treturn nil\n}\n\nfunc (m *Message) GetProto2Field() *testdata.SubDefaults {\n\tif m != nil {\n\t\treturn m.Proto2Field\n\t}\n\treturn nil\n}\n\nfunc (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {\n\tif m != nil {\n\t\treturn m.Proto2Value\n\t}\n\treturn nil\n}\n\ntype Nested struct {\n\tBunny string `protobuf:\"bytes,1,opt,name=bunny\" json:\"bunny,omitempty\"`\n}\n\nfunc (m *Nested) Reset()         { *m = Nested{} }\nfunc (m *Nested) String() string { return proto.CompactTextString(m) }\nfunc (*Nested) ProtoMessage()    {}\n\ntype MessageWithMap struct {\n\tByteMapping map[bool][]byte `protobuf:\"bytes,1,rep,name=byte_mapping\" json:\"byte_mapping,omitempty\" protobuf_key:\"varint,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value,proto3\"`\n}\n\nfunc (m *MessageWithMap) Reset()         { *m = MessageWithMap{} }\nfunc (m *MessageWithMap) String() string { return proto.CompactTextString(m) }\nfunc (*MessageWithMap) ProtoMessage()    {}\n\nfunc (m *MessageWithMap) GetByteMapping() map[bool][]byte {\n\tif m != nil {\n\t\treturn m.ByteMapping\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"proto3_proto.Message_Humour\", Message_Humour_name, Message_Humour_value)\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\nimport \"testdata/test.proto\";\n\npackage proto3_proto;\n\nmessage Message {\n  enum Humour {\n    UNKNOWN = 0;\n    PUNS = 1;\n    SLAPSTICK = 2;\n    BILL_BAILEY = 3;\n  }\n\n  string name = 1;\n  Humour hilarity = 2;\n  uint32 height_in_cm = 3;\n  bytes data = 4;\n  int64 result_count = 7;\n  bool true_scotsman = 8;\n  float score = 9;\n\n  repeated uint64 key = 5;\n  Nested nested = 6;\n\n  map<string, Nested> terrain = 10;\n  testdata.SubDefaults proto2_field = 11;\n  map<string, testdata.SubDefaults> proto2_value = 13;\n}\n\nmessage Nested {\n  string bunny = 1;\n}\n\nmessage MessageWithMap {\n  map<bool, bytes> byte_mapping = 1;\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n// Functions for writing the text protocol buffer format.\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n)\n\nvar (\n\tnewline         = []byte(\"\\n\")\n\tspaces          = []byte(\"                                        \")\n\tgtNewline       = []byte(\">\\n\")\n\tendBraceNewline = []byte(\"}\\n\")\n\tbackslashN      = []byte{'\\\\', 'n'}\n\tbackslashR      = []byte{'\\\\', 'r'}\n\tbackslashT      = []byte{'\\\\', 't'}\n\tbackslashDQ     = []byte{'\\\\', '\"'}\n\tbackslashBS     = []byte{'\\\\', '\\\\'}\n\tposInf          = []byte(\"inf\")\n\tnegInf          = []byte(\"-inf\")\n\tnan             = []byte(\"nan\")\n)\n\ntype writer interface {\n\tio.Writer\n\tWriteByte(byte) error\n}\n\n// textWriter is an io.Writer that tracks its indentation level.\ntype textWriter struct {\n\tind      int\n\tcomplete bool // if the current position is a complete line\n\tcompact  bool // whether to write out as a one-liner\n\tw        writer\n}\n\nfunc (w *textWriter) WriteString(s string) (n int, err error) {\n\tif !strings.Contains(s, \"\\n\") {\n\t\tif !w.compact && w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tw.complete = false\n\t\treturn io.WriteString(w.w, s)\n\t}\n\t// WriteString is typically called without newlines, so this\n\t// codepath and its copy are rare.  We copy to avoid\n\t// duplicating all of Write's logic here.\n\treturn w.Write([]byte(s))\n}\n\nfunc (w *textWriter) Write(p []byte) (n int, err error) {\n\tnewlines := bytes.Count(p, newline)\n\tif newlines == 0 {\n\t\tif !w.compact && w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tn, err = w.w.Write(p)\n\t\tw.complete = false\n\t\treturn n, err\n\t}\n\n\tfrags := bytes.SplitN(p, newline, newlines+1)\n\tif w.compact {\n\t\tfor i, frag := range frags {\n\t\t\tif i > 0 {\n\t\t\t\tif err := w.w.WriteByte(' '); err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tn++\n\t\t\t}\n\t\t\tnn, err := w.w.Write(frag)\n\t\t\tn += nn\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\t\treturn n, nil\n\t}\n\n\tfor i, frag := range frags {\n\t\tif w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tnn, err := w.w.Write(frag)\n\t\tn += nn\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif i+1 < len(frags) {\n\t\t\tif err := w.w.WriteByte('\\n'); err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}\n\tw.complete = len(frags[len(frags)-1]) == 0\n\treturn n, nil\n}\n\nfunc (w *textWriter) WriteByte(c byte) error {\n\tif w.compact && c == '\\n' {\n\t\tc = ' '\n\t}\n\tif !w.compact && w.complete {\n\t\tw.writeIndent()\n\t}\n\terr := w.w.WriteByte(c)\n\tw.complete = c == '\\n'\n\treturn err\n}\n\nfunc (w *textWriter) indent() { w.ind++ }\n\nfunc (w *textWriter) unindent() {\n\tif w.ind == 0 {\n\t\tlog.Printf(\"proto: textWriter unindented too far\")\n\t\treturn\n\t}\n\tw.ind--\n}\n\nfunc writeName(w *textWriter, props *Properties) error {\n\tif _, err := w.WriteString(props.OrigName); err != nil {\n\t\treturn err\n\t}\n\tif props.Wire != \"group\" {\n\t\treturn w.WriteByte(':')\n\t}\n\treturn nil\n}\n\nvar (\n\tmessageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()\n)\n\n// raw is the interface satisfied by RawMessage.\ntype raw interface {\n\tBytes() []byte\n}\n\nfunc writeStruct(w *textWriter, sv reflect.Value) error {\n\tif sv.Type() == messageSetType {\n\t\treturn writeMessageSet(w, sv.Addr().Interface().(*MessageSet))\n\t}\n\n\tst := sv.Type()\n\tsprops := GetProperties(st)\n\tfor i := 0; i < sv.NumField(); i++ {\n\t\tfv := sv.Field(i)\n\t\tprops := sprops.Prop[i]\n\t\tname := st.Field(i).Name\n\n\t\tif strings.HasPrefix(name, \"XXX_\") {\n\t\t\t// There are two XXX_ fields:\n\t\t\t//   XXX_unrecognized []byte\n\t\t\t//   XXX_extensions   map[int32]proto.Extension\n\t\t\t// The first is handled here;\n\t\t\t// the second is handled at the bottom of this function.\n\t\t\tif name == \"XXX_unrecognized\" && !fv.IsNil() {\n\t\t\t\tif err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Ptr && fv.IsNil() {\n\t\t\t// Field not filled in. This could be an optional field or\n\t\t\t// a required field that wasn't filled in. Either way, there\n\t\t\t// isn't anything we can show for it.\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Slice && fv.IsNil() {\n\t\t\t// Repeated field that is empty, or a bytes field that is unused.\n\t\t\tcontinue\n\t\t}\n\n\t\tif props.Repeated && fv.Kind() == reflect.Slice {\n\t\t\t// Repeated field.\n\t\t\tfor j := 0; j < fv.Len(); j++ {\n\t\t\t\tif err := writeName(w, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tv := fv.Index(j)\n\t\t\t\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\t\t\t\t// A nil message in a repeated field is not valid,\n\t\t\t\t\t// but we can handle that more gracefully than panicking.\n\t\t\t\t\tif _, err := w.Write([]byte(\"<nil>\\n\")); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := writeAny(w, v, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Map {\n\t\t\t// Map fields are rendered as a repeated struct with key/value fields.\n\t\t\tkeys := fv.MapKeys() // TODO: should we sort these for deterministic output?\n\t\t\tsort.Sort(mapKeys(keys))\n\t\t\tfor _, key := range keys {\n\t\t\t\tval := fv.MapIndex(key)\n\t\t\t\tif err := writeName(w, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// open struct\n\t\t\t\tif err := w.WriteByte('<'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tw.indent()\n\t\t\t\t// key\n\t\t\t\tif _, err := w.WriteString(\"key:\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err := writeAny(w, key, props.mkeyprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// nil values aren't legal, but we can avoid panicking because of them.\n\t\t\t\tif val.Kind() != reflect.Ptr || !val.IsNil() {\n\t\t\t\t\t// value\n\t\t\t\t\tif _, err := w.WriteString(\"value:\"); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif !w.compact {\n\t\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif err := writeAny(w, val, props.mvalprop); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// close struct\n\t\t\t\tw.unindent()\n\t\t\t\tif err := w.WriteByte('>'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {\n\t\t\t// empty bytes field\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {\n\t\t\t// proto3 non-repeated scalar field; skip if zero value\n\t\t\tswitch fv.Kind() {\n\t\t\tcase reflect.Bool:\n\t\t\t\tif !fv.Bool() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.Int32, reflect.Int64:\n\t\t\t\tif fv.Int() == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.Uint32, reflect.Uint64:\n\t\t\t\tif fv.Uint() == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tif fv.Float() == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.String:\n\t\t\t\tif fv.String() == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := writeName(w, props); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !w.compact {\n\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif b, ok := fv.Interface().(raw); ok {\n\t\t\tif err := writeRaw(w, b.Bytes()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Enums have a String method, so writeAny will work fine.\n\t\tif err := writeAny(w, fv, props); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Extensions (the XXX_extensions field).\n\tpv := sv.Addr()\n\tif pv.Type().Implements(extendableProtoType) {\n\t\tif err := writeExtensions(w, pv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// writeRaw writes an uninterpreted raw message.\nfunc writeRaw(w *textWriter, b []byte) error {\n\tif err := w.WriteByte('<'); err != nil {\n\t\treturn err\n\t}\n\tif !w.compact {\n\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tw.indent()\n\tif err := writeUnknownStruct(w, b); err != nil {\n\t\treturn err\n\t}\n\tw.unindent()\n\tif err := w.WriteByte('>'); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// writeAny writes an arbitrary field.\nfunc writeAny(w *textWriter, v reflect.Value, props *Properties) error {\n\tv = reflect.Indirect(v)\n\n\t// Floats have special cases.\n\tif v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {\n\t\tx := v.Float()\n\t\tvar b []byte\n\t\tswitch {\n\t\tcase math.IsInf(x, 1):\n\t\t\tb = posInf\n\t\tcase math.IsInf(x, -1):\n\t\t\tb = negInf\n\t\tcase math.IsNaN(x):\n\t\t\tb = nan\n\t\t}\n\t\tif b != nil {\n\t\t\t_, err := w.Write(b)\n\t\t\treturn err\n\t\t}\n\t\t// Other values are handled below.\n\t}\n\n\t// We don't attempt to serialise every possible value type; only those\n\t// that can occur in protocol buffers.\n\tswitch v.Kind() {\n\tcase reflect.Slice:\n\t\t// Should only be a []byte; repeated fields are handled in writeStruct.\n\t\tif err := writeString(w, string(v.Interface().([]byte))); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.String:\n\t\tif err := writeString(w, v.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.Struct:\n\t\t// Required/optional group/message.\n\t\tvar bra, ket byte = '<', '>'\n\t\tif props != nil && props.Wire == \"group\" {\n\t\t\tbra, ket = '{', '}'\n\t\t}\n\t\tif err := w.WriteByte(bra); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !w.compact {\n\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tw.indent()\n\t\tif tm, ok := v.Interface().(encoding.TextMarshaler); ok {\n\t\t\ttext, err := tm.MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err = w.Write(text); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if err := writeStruct(w, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw.unindent()\n\t\tif err := w.WriteByte(ket); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\t_, err := fmt.Fprint(w, v.Interface())\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// equivalent to C's isprint.\nfunc isprint(c byte) bool {\n\treturn c >= 0x20 && c < 0x7f\n}\n\n// writeString writes a string in the protocol buffer text format.\n// It is similar to strconv.Quote except we don't use Go escape sequences,\n// we treat the string as a byte sequence, and we use octal escapes.\n// These differences are to maintain interoperability with the other\n// languages' implementations of the text format.\nfunc writeString(w *textWriter, s string) error {\n\t// use WriteByte here to get any needed indent\n\tif err := w.WriteByte('\"'); err != nil {\n\t\treturn err\n\t}\n\t// Loop over the bytes, not the runes.\n\tfor i := 0; i < len(s); i++ {\n\t\tvar err error\n\t\t// Divergence from C++: we don't escape apostrophes.\n\t\t// There's no need to escape them, and the C++ parser\n\t\t// copes with a naked apostrophe.\n\t\tswitch c := s[i]; c {\n\t\tcase '\\n':\n\t\t\t_, err = w.w.Write(backslashN)\n\t\tcase '\\r':\n\t\t\t_, err = w.w.Write(backslashR)\n\t\tcase '\\t':\n\t\t\t_, err = w.w.Write(backslashT)\n\t\tcase '\"':\n\t\t\t_, err = w.w.Write(backslashDQ)\n\t\tcase '\\\\':\n\t\t\t_, err = w.w.Write(backslashBS)\n\t\tdefault:\n\t\t\tif isprint(c) {\n\t\t\t\terr = w.w.WriteByte(c)\n\t\t\t} else {\n\t\t\t\t_, err = fmt.Fprintf(w.w, \"\\\\%03o\", c)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.WriteByte('\"')\n}\n\nfunc writeMessageSet(w *textWriter, ms *MessageSet) error {\n\tfor _, item := range ms.Item {\n\t\tid := *item.TypeId\n\t\tif msd, ok := messageSetMap[id]; ok {\n\t\t\t// Known message set type.\n\t\t\tif _, err := fmt.Fprintf(w, \"[%s]: <\\n\", msd.name); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.indent()\n\n\t\t\tpb := reflect.New(msd.t.Elem())\n\t\t\tif err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {\n\t\t\t\tif _, err := fmt.Fprintf(w, \"/* bad message: %v */\\n\", err); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := writeStruct(w, pb.Elem()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Unknown type.\n\t\t\tif _, err := fmt.Fprintf(w, \"[%d]: <\\n\", id); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.indent()\n\t\t\tif err := writeUnknownStruct(w, item.Message); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tw.unindent()\n\t\tif _, err := w.Write(gtNewline); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeUnknownStruct(w *textWriter, data []byte) (err error) {\n\tif !w.compact {\n\t\tif _, err := fmt.Fprintf(w, \"/* %d unknown bytes */\\n\", len(data)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tb := NewBuffer(data)\n\tfor b.index < len(b.buf) {\n\t\tx, err := b.DecodeVarint()\n\t\tif err != nil {\n\t\t\t_, err := fmt.Fprintf(w, \"/* %v */\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\twire, tag := x&7, x>>3\n\t\tif wire == WireEndGroup {\n\t\t\tw.unindent()\n\t\t\tif _, err := w.Write(endBraceNewline); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := fmt.Fprint(w, tag); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif wire != WireStartGroup {\n\t\t\tif err := w.WriteByte(':'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif !w.compact || wire == WireStartGroup {\n\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tswitch wire {\n\t\tcase WireBytes:\n\t\t\tbuf, e := b.DecodeRawBytes(false)\n\t\t\tif e == nil {\n\t\t\t\t_, err = fmt.Fprintf(w, \"%q\", buf)\n\t\t\t} else {\n\t\t\t\t_, err = fmt.Fprintf(w, \"/* %v */\", e)\n\t\t\t}\n\t\tcase WireFixed32:\n\t\t\tx, err = b.DecodeFixed32()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tcase WireFixed64:\n\t\t\tx, err = b.DecodeFixed64()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tcase WireStartGroup:\n\t\t\terr = w.WriteByte('{')\n\t\t\tw.indent()\n\t\tcase WireVarint:\n\t\t\tx, err = b.DecodeVarint()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tdefault:\n\t\t\t_, err = fmt.Fprintf(w, \"/* unknown wire type %d */\", wire)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeUnknownInt(w *textWriter, x uint64, err error) error {\n\tif err == nil {\n\t\t_, err = fmt.Fprint(w, x)\n\t} else {\n\t\t_, err = fmt.Fprintf(w, \"/* %v */\", err)\n\t}\n\treturn err\n}\n\ntype int32Slice []int32\n\nfunc (s int32Slice) Len() int           { return len(s) }\nfunc (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }\nfunc (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n\n// writeExtensions writes all the extensions in pv.\n// pv is assumed to be a pointer to a protocol message struct that is extendable.\nfunc writeExtensions(w *textWriter, pv reflect.Value) error {\n\temap := extensionMaps[pv.Type().Elem()]\n\tep := pv.Interface().(extendableProto)\n\n\t// Order the extensions by ID.\n\t// This isn't strictly necessary, but it will give us\n\t// canonical output, which will also make testing easier.\n\tm := ep.ExtensionMap()\n\tids := make([]int32, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, id)\n\t}\n\tsort.Sort(int32Slice(ids))\n\n\tfor _, extNum := range ids {\n\t\text := m[extNum]\n\t\tvar desc *ExtensionDesc\n\t\tif emap != nil {\n\t\t\tdesc = emap[extNum]\n\t\t}\n\t\tif desc == nil {\n\t\t\t// Unknown extension.\n\t\t\tif err := writeUnknownStruct(w, ext.enc); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tpb, err := GetExtension(ep, desc)\n\t\tif err != nil {\n\t\t\tif _, err := fmt.Fprintln(os.Stderr, \"proto: failed getting extension: \", err); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Repeated extensions will appear as a slice.\n\t\tif !desc.repeated() {\n\t\t\tif err := writeExtension(w, desc.Name, pb); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tv := reflect.ValueOf(pb)\n\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\tif err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeExtension(w *textWriter, name string, pb interface{}) error {\n\tif _, err := fmt.Fprintf(w, \"[%s]:\", name); err != nil {\n\t\treturn err\n\t}\n\tif !w.compact {\n\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {\n\t\treturn err\n\t}\n\tif err := w.WriteByte('\\n'); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (w *textWriter) writeIndent() {\n\tif !w.complete {\n\t\treturn\n\t}\n\tremain := w.ind * 2\n\tfor remain > 0 {\n\t\tn := remain\n\t\tif n > len(spaces) {\n\t\t\tn = len(spaces)\n\t\t}\n\t\tw.w.Write(spaces[:n])\n\t\tremain -= n\n\t}\n\tw.complete = false\n}\n\nfunc marshalText(w io.Writer, pb Message, compact bool) error {\n\tval := reflect.ValueOf(pb)\n\tif pb == nil || val.IsNil() {\n\t\tw.Write([]byte(\"<nil>\"))\n\t\treturn nil\n\t}\n\tvar bw *bufio.Writer\n\tww, ok := w.(writer)\n\tif !ok {\n\t\tbw = bufio.NewWriter(w)\n\t\tww = bw\n\t}\n\taw := &textWriter{\n\t\tw:        ww,\n\t\tcomplete: true,\n\t\tcompact:  compact,\n\t}\n\n\tif tm, ok := pb.(encoding.TextMarshaler); ok {\n\t\ttext, err := tm.MarshalText()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err = aw.Write(text); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif bw != nil {\n\t\t\treturn bw.Flush()\n\t\t}\n\t\treturn nil\n\t}\n\t// Dereference the received pointer so we don't have outer < and >.\n\tv := reflect.Indirect(val)\n\tif err := writeStruct(aw, v); err != nil {\n\t\treturn err\n\t}\n\tif bw != nil {\n\t\treturn bw.Flush()\n\t}\n\treturn nil\n}\n\n// MarshalText writes a given protocol buffer in text format.\n// The only errors returned are from w.\nfunc MarshalText(w io.Writer, pb Message) error {\n\treturn marshalText(w, pb, false)\n}\n\n// MarshalTextString is the same as MarshalText, but returns the string directly.\nfunc MarshalTextString(pb Message) string {\n\tvar buf bytes.Buffer\n\tmarshalText(&buf, pb, false)\n\treturn buf.String()\n}\n\n// CompactText writes a given protocol buffer in compact text format (one line).\nfunc CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }\n\n// CompactTextString is the same as CompactText, but returns the string directly.\nfunc CompactTextString(pb Message) string {\n\tvar buf bytes.Buffer\n\tmarshalText(&buf, pb, true)\n\treturn buf.String()\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text_parser.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n// Functions for parsing the Text protocol buffer format.\n// TODO: message sets.\n\nimport (\n\t\"encoding\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\ntype ParseError struct {\n\tMessage string\n\tLine    int // 1-based line number\n\tOffset  int // 0-based byte offset from start of input\n}\n\nfunc (p *ParseError) Error() string {\n\tif p.Line == 1 {\n\t\t// show offset only for first line\n\t\treturn fmt.Sprintf(\"line 1.%d: %v\", p.Offset, p.Message)\n\t}\n\treturn fmt.Sprintf(\"line %d: %v\", p.Line, p.Message)\n}\n\ntype token struct {\n\tvalue    string\n\terr      *ParseError\n\tline     int    // line number\n\toffset   int    // byte number from start of input, not start of line\n\tunquoted string // the unquoted version of value, if it was a quoted string\n}\n\nfunc (t *token) String() string {\n\tif t.err == nil {\n\t\treturn fmt.Sprintf(\"%q (line=%d, offset=%d)\", t.value, t.line, t.offset)\n\t}\n\treturn fmt.Sprintf(\"parse error: %v\", t.err)\n}\n\ntype textParser struct {\n\ts            string // remaining input\n\tdone         bool   // whether the parsing is finished (success or error)\n\tbacked       bool   // whether back() was called\n\toffset, line int\n\tcur          token\n}\n\nfunc newTextParser(s string) *textParser {\n\tp := new(textParser)\n\tp.s = s\n\tp.line = 1\n\tp.cur.line = 1\n\treturn p\n}\n\nfunc (p *textParser) errorf(format string, a ...interface{}) *ParseError {\n\tpe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}\n\tp.cur.err = pe\n\tp.done = true\n\treturn pe\n}\n\n// Numbers and identifiers are matched by [-+._A-Za-z0-9]\nfunc isIdentOrNumberChar(c byte) bool {\n\tswitch {\n\tcase 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':\n\t\treturn true\n\tcase '0' <= c && c <= '9':\n\t\treturn true\n\t}\n\tswitch c {\n\tcase '-', '+', '.', '_':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isWhitespace(c byte) bool {\n\tswitch c {\n\tcase ' ', '\\t', '\\n', '\\r':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (p *textParser) skipWhitespace() {\n\ti := 0\n\tfor i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {\n\t\tif p.s[i] == '#' {\n\t\t\t// comment; skip to end of line or input\n\t\t\tfor i < len(p.s) && p.s[i] != '\\n' {\n\t\t\t\ti++\n\t\t\t}\n\t\t\tif i == len(p.s) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif p.s[i] == '\\n' {\n\t\t\tp.line++\n\t\t}\n\t\ti++\n\t}\n\tp.offset += i\n\tp.s = p.s[i:len(p.s)]\n\tif len(p.s) == 0 {\n\t\tp.done = true\n\t}\n}\n\nfunc (p *textParser) advance() {\n\t// Skip whitespace\n\tp.skipWhitespace()\n\tif p.done {\n\t\treturn\n\t}\n\n\t// Start of non-whitespace\n\tp.cur.err = nil\n\tp.cur.offset, p.cur.line = p.offset, p.line\n\tp.cur.unquoted = \"\"\n\tswitch p.s[0] {\n\tcase '<', '>', '{', '}', ':', '[', ']', ';', ',':\n\t\t// Single symbol\n\t\tp.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]\n\tcase '\"', '\\'':\n\t\t// Quoted string\n\t\ti := 1\n\t\tfor i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\\n' {\n\t\t\tif p.s[i] == '\\\\' && i+1 < len(p.s) {\n\t\t\t\t// skip escaped char\n\t\t\t\ti++\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tif i >= len(p.s) || p.s[i] != p.s[0] {\n\t\t\tp.errorf(\"unmatched quote\")\n\t\t\treturn\n\t\t}\n\t\tunq, err := unquoteC(p.s[1:i], rune(p.s[0]))\n\t\tif err != nil {\n\t\t\tp.errorf(\"invalid quoted string %v\", p.s[0:i+1])\n\t\t\treturn\n\t\t}\n\t\tp.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]\n\t\tp.cur.unquoted = unq\n\tdefault:\n\t\ti := 0\n\t\tfor i < len(p.s) && isIdentOrNumberChar(p.s[i]) {\n\t\t\ti++\n\t\t}\n\t\tif i == 0 {\n\t\t\tp.errorf(\"unexpected byte %#x\", p.s[0])\n\t\t\treturn\n\t\t}\n\t\tp.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]\n\t}\n\tp.offset += len(p.cur.value)\n}\n\nvar (\n\terrBadUTF8 = errors.New(\"proto: bad UTF-8\")\n\terrBadHex  = errors.New(\"proto: bad hexadecimal\")\n)\n\nfunc unquoteC(s string, quote rune) (string, error) {\n\t// This is based on C++'s tokenizer.cc.\n\t// Despite its name, this is *not* parsing C syntax.\n\t// For instance, \"\\0\" is an invalid quoted string.\n\n\t// Avoid allocation in trivial cases.\n\tsimple := true\n\tfor _, r := range s {\n\t\tif r == '\\\\' || r == quote {\n\t\t\tsimple = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif simple {\n\t\treturn s, nil\n\t}\n\n\tbuf := make([]byte, 0, 3*len(s)/2)\n\tfor len(s) > 0 {\n\t\tr, n := utf8.DecodeRuneInString(s)\n\t\tif r == utf8.RuneError && n == 1 {\n\t\t\treturn \"\", errBadUTF8\n\t\t}\n\t\ts = s[n:]\n\t\tif r != '\\\\' {\n\t\t\tif r < utf8.RuneSelf {\n\t\t\t\tbuf = append(buf, byte(r))\n\t\t\t} else {\n\t\t\t\tbuf = append(buf, string(r)...)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tch, tail, err := unescape(s)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbuf = append(buf, ch...)\n\t\ts = tail\n\t}\n\treturn string(buf), nil\n}\n\nfunc unescape(s string) (ch string, tail string, err error) {\n\tr, n := utf8.DecodeRuneInString(s)\n\tif r == utf8.RuneError && n == 1 {\n\t\treturn \"\", \"\", errBadUTF8\n\t}\n\ts = s[n:]\n\tswitch r {\n\tcase 'a':\n\t\treturn \"\\a\", s, nil\n\tcase 'b':\n\t\treturn \"\\b\", s, nil\n\tcase 'f':\n\t\treturn \"\\f\", s, nil\n\tcase 'n':\n\t\treturn \"\\n\", s, nil\n\tcase 'r':\n\t\treturn \"\\r\", s, nil\n\tcase 't':\n\t\treturn \"\\t\", s, nil\n\tcase 'v':\n\t\treturn \"\\v\", s, nil\n\tcase '?':\n\t\treturn \"?\", s, nil // trigraph workaround\n\tcase '\\'', '\"', '\\\\':\n\t\treturn string(r), s, nil\n\tcase '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':\n\t\tif len(s) < 2 {\n\t\t\treturn \"\", \"\", fmt.Errorf(`\\%c requires 2 following digits`, r)\n\t\t}\n\t\tbase := 8\n\t\tss := s[:2]\n\t\ts = s[2:]\n\t\tif r == 'x' || r == 'X' {\n\t\t\tbase = 16\n\t\t} else {\n\t\t\tss = string(r) + ss\n\t\t}\n\t\ti, err := strconv.ParseUint(ss, base, 8)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn string([]byte{byte(i)}), s, nil\n\tcase 'u', 'U':\n\t\tn := 4\n\t\tif r == 'U' {\n\t\t\tn = 8\n\t\t}\n\t\tif len(s) < n {\n\t\t\treturn \"\", \"\", fmt.Errorf(`\\%c requires %d digits`, r, n)\n\t\t}\n\n\t\tbs := make([]byte, n/2)\n\t\tfor i := 0; i < n; i += 2 {\n\t\t\ta, ok1 := unhex(s[i])\n\t\t\tb, ok2 := unhex(s[i+1])\n\t\t\tif !ok1 || !ok2 {\n\t\t\t\treturn \"\", \"\", errBadHex\n\t\t\t}\n\t\t\tbs[i/2] = a<<4 | b\n\t\t}\n\t\ts = s[n:]\n\t\treturn string(bs), s, nil\n\t}\n\treturn \"\", \"\", fmt.Errorf(`unknown escape \\%c`, r)\n}\n\n// Adapted from src/pkg/strconv/quote.go.\nfunc unhex(b byte) (v byte, ok bool) {\n\tswitch {\n\tcase '0' <= b && b <= '9':\n\t\treturn b - '0', true\n\tcase 'a' <= b && b <= 'f':\n\t\treturn b - 'a' + 10, true\n\tcase 'A' <= b && b <= 'F':\n\t\treturn b - 'A' + 10, true\n\t}\n\treturn 0, false\n}\n\n// Back off the parser by one token. Can only be done between calls to next().\n// It makes the next advance() a no-op.\nfunc (p *textParser) back() { p.backed = true }\n\n// Advances the parser and returns the new current token.\nfunc (p *textParser) next() *token {\n\tif p.backed || p.done {\n\t\tp.backed = false\n\t\treturn &p.cur\n\t}\n\tp.advance()\n\tif p.done {\n\t\tp.cur.value = \"\"\n\t} else if len(p.cur.value) > 0 && p.cur.value[0] == '\"' {\n\t\t// Look for multiple quoted strings separated by whitespace,\n\t\t// and concatenate them.\n\t\tcat := p.cur\n\t\tfor {\n\t\t\tp.skipWhitespace()\n\t\t\tif p.done || p.s[0] != '\"' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tp.advance()\n\t\t\tif p.cur.err != nil {\n\t\t\t\treturn &p.cur\n\t\t\t}\n\t\t\tcat.value += \" \" + p.cur.value\n\t\t\tcat.unquoted += p.cur.unquoted\n\t\t}\n\t\tp.done = false // parser may have seen EOF, but we want to return cat\n\t\tp.cur = cat\n\t}\n\treturn &p.cur\n}\n\nfunc (p *textParser) consumeToken(s string) error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != s {\n\t\tp.back()\n\t\treturn p.errorf(\"expected %q, found %q\", s, tok.value)\n\t}\n\treturn nil\n}\n\n// Return a RequiredNotSetError indicating which required field was not set.\nfunc (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {\n\tst := sv.Type()\n\tsprops := GetProperties(st)\n\tfor i := 0; i < st.NumField(); i++ {\n\t\tif !isNil(sv.Field(i)) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprops := sprops.Prop[i]\n\t\tif props.Required {\n\t\t\treturn &RequiredNotSetError{fmt.Sprintf(\"%v.%v\", st, props.OrigName)}\n\t\t}\n\t}\n\treturn &RequiredNotSetError{fmt.Sprintf(\"%v.<unknown field name>\", st)} // should not happen\n}\n\n// Returns the index in the struct for the named field, as well as the parsed tag properties.\nfunc structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {\n\tsprops := GetProperties(st)\n\ti, ok := sprops.decoderOrigNames[name]\n\tif ok {\n\t\treturn i, sprops.Prop[i], true\n\t}\n\treturn -1, nil, false\n}\n\n// Consume a ':' from the input stream (if the next token is a colon),\n// returning an error if a colon is needed but not present.\nfunc (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != \":\" {\n\t\t// Colon is optional when the field is a group or message.\n\t\tneedColon := true\n\t\tswitch props.Wire {\n\t\tcase \"group\":\n\t\t\tneedColon = false\n\t\tcase \"bytes\":\n\t\t\t// A \"bytes\" field is either a message, a string, or a repeated field;\n\t\t\t// those three become *T, *string and []T respectively, so we can check for\n\t\t\t// this field being a pointer to a non-string.\n\t\t\tif typ.Kind() == reflect.Ptr {\n\t\t\t\t// *T or *string\n\t\t\t\tif typ.Elem().Kind() == reflect.String {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if typ.Kind() == reflect.Slice {\n\t\t\t\t// []T or []*T\n\t\t\t\tif typ.Elem().Kind() != reflect.Ptr {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if typ.Kind() == reflect.String {\n\t\t\t\t// The proto3 exception is for a string field,\n\t\t\t\t// which requires a colon.\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tneedColon = false\n\t\t}\n\t\tif needColon {\n\t\t\treturn p.errorf(\"expected ':', found %q\", tok.value)\n\t\t}\n\t\tp.back()\n\t}\n\treturn nil\n}\n\nfunc (p *textParser) readStruct(sv reflect.Value, terminator string) error {\n\tst := sv.Type()\n\treqCount := GetProperties(st).reqCount\n\tvar reqFieldErr error\n\tfieldSet := make(map[string]bool)\n\t// A struct is a sequence of \"name: value\", terminated by one of\n\t// '>' or '}', or the end of the input.  A name may also be\n\t// \"[extension]\".\n\tfor {\n\t\ttok := p.next()\n\t\tif tok.err != nil {\n\t\t\treturn tok.err\n\t\t}\n\t\tif tok.value == terminator {\n\t\t\tbreak\n\t\t}\n\t\tif tok.value == \"[\" {\n\t\t\t// Looks like an extension.\n\t\t\t//\n\t\t\t// TODO: Check whether we need to handle\n\t\t\t// namespace rooted names (e.g. \".something.Foo\").\n\t\t\ttok = p.next()\n\t\t\tif tok.err != nil {\n\t\t\t\treturn tok.err\n\t\t\t}\n\t\t\tvar desc *ExtensionDesc\n\t\t\t// This could be faster, but it's functional.\n\t\t\t// TODO: Do something smarter than a linear scan.\n\t\t\tfor _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {\n\t\t\t\tif d.Name == tok.value {\n\t\t\t\t\tdesc = d\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif desc == nil {\n\t\t\t\treturn p.errorf(\"unrecognized extension %q\", tok.value)\n\t\t\t}\n\t\t\t// Check the extension terminator.\n\t\t\ttok = p.next()\n\t\t\tif tok.err != nil {\n\t\t\t\treturn tok.err\n\t\t\t}\n\t\t\tif tok.value != \"]\" {\n\t\t\t\treturn p.errorf(\"unrecognized extension terminator %q\", tok.value)\n\t\t\t}\n\n\t\t\tprops := &Properties{}\n\t\t\tprops.Parse(desc.Tag)\n\n\t\t\ttyp := reflect.TypeOf(desc.ExtensionType)\n\t\t\tif err := p.checkForColon(props, typ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\trep := desc.repeated()\n\n\t\t\t// Read the extension structure, and set it in\n\t\t\t// the value we're constructing.\n\t\t\tvar ext reflect.Value\n\t\t\tif !rep {\n\t\t\t\text = reflect.New(typ).Elem()\n\t\t\t} else {\n\t\t\t\text = reflect.New(typ.Elem()).Elem()\n\t\t\t}\n\t\t\tif err := p.readAny(ext, props); err != nil {\n\t\t\t\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqFieldErr = err\n\t\t\t}\n\t\t\tep := sv.Addr().Interface().(extendableProto)\n\t\t\tif !rep {\n\t\t\t\tSetExtension(ep, desc, ext.Interface())\n\t\t\t} else {\n\t\t\t\told, err := GetExtension(ep, desc)\n\t\t\t\tvar sl reflect.Value\n\t\t\t\tif err == nil {\n\t\t\t\t\tsl = reflect.ValueOf(old) // existing slice\n\t\t\t\t} else {\n\t\t\t\t\tsl = reflect.MakeSlice(typ, 0, 1)\n\t\t\t\t}\n\t\t\t\tsl = reflect.Append(sl, ext)\n\t\t\t\tSetExtension(ep, desc, sl.Interface())\n\t\t\t}\n\t\t} else {\n\t\t\t// This is a normal, non-extension field.\n\t\t\tname := tok.value\n\t\t\tfi, props, ok := structFieldByName(st, name)\n\t\t\tif !ok {\n\t\t\t\treturn p.errorf(\"unknown field name %q in %v\", name, st)\n\t\t\t}\n\n\t\t\tdst := sv.Field(fi)\n\n\t\t\tif dst.Kind() == reflect.Map {\n\t\t\t\t// Consume any colon.\n\t\t\t\tif err := p.checkForColon(props, dst.Type()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Construct the map if it doesn't already exist.\n\t\t\t\tif dst.IsNil() {\n\t\t\t\t\tdst.Set(reflect.MakeMap(dst.Type()))\n\t\t\t\t}\n\t\t\t\tkey := reflect.New(dst.Type().Key()).Elem()\n\t\t\t\tval := reflect.New(dst.Type().Elem()).Elem()\n\n\t\t\t\t// The map entry should be this sequence of tokens:\n\t\t\t\t//\t< key : KEY value : VALUE >\n\t\t\t\t// Technically the \"key\" and \"value\" could come in any order,\n\t\t\t\t// but in practice they won't.\n\n\t\t\t\ttok := p.next()\n\t\t\t\tvar terminator string\n\t\t\t\tswitch tok.value {\n\t\t\t\tcase \"<\":\n\t\t\t\t\tterminator = \">\"\n\t\t\t\tcase \"{\":\n\t\t\t\t\tterminator = \"}\"\n\t\t\t\tdefault:\n\t\t\t\t\treturn p.errorf(\"expected '{' or '<', found %q\", tok.value)\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(\"key\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(\":\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.readAny(key, props.mkeyprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(\"value\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.readAny(val, props.mvalprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(terminator); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tdst.SetMapIndex(key, val)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check that it's not already set if it's not a repeated field.\n\t\t\tif !props.Repeated && fieldSet[name] {\n\t\t\t\treturn p.errorf(\"non-repeated field %q was repeated\", name)\n\t\t\t}\n\n\t\t\tif err := p.checkForColon(props, st.Field(fi).Type); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Parse into the field.\n\t\t\tfieldSet[name] = true\n\t\t\tif err := p.readAny(dst, props); err != nil {\n\t\t\t\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqFieldErr = err\n\t\t\t} else if props.Required {\n\t\t\t\treqCount--\n\t\t\t}\n\t\t}\n\n\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif reqCount > 0 {\n\t\treturn p.missingRequiredFieldError(sv)\n\t}\n\treturn reqFieldErr\n}\n\n// consumeOptionalSeparator consumes an optional semicolon or comma.\n// It is used in readStruct to provide backward compatibility.\nfunc (p *textParser) consumeOptionalSeparator() error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != \";\" && tok.value != \",\" {\n\t\tp.back()\n\t}\n\treturn nil\n}\n\nfunc (p *textParser) readAny(v reflect.Value, props *Properties) error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value == \"\" {\n\t\treturn p.errorf(\"unexpected EOF\")\n\t}\n\n\tswitch fv := v; fv.Kind() {\n\tcase reflect.Slice:\n\t\tat := v.Type()\n\t\tif at.Elem().Kind() == reflect.Uint8 {\n\t\t\t// Special case for []byte\n\t\t\tif tok.value[0] != '\"' && tok.value[0] != '\\'' {\n\t\t\t\t// Deliberately written out here, as the error after\n\t\t\t\t// this switch statement would write \"invalid []byte: ...\",\n\t\t\t\t// which is not as user-friendly.\n\t\t\t\treturn p.errorf(\"invalid string: %v\", tok.value)\n\t\t\t}\n\t\t\tbytes := []byte(tok.unquoted)\n\t\t\tfv.Set(reflect.ValueOf(bytes))\n\t\t\treturn nil\n\t\t}\n\t\t// Repeated field. May already exist.\n\t\tflen := fv.Len()\n\t\tif flen == fv.Cap() {\n\t\t\tnav := reflect.MakeSlice(at, flen, 2*flen+1)\n\t\t\treflect.Copy(nav, fv)\n\t\t\tfv.Set(nav)\n\t\t}\n\t\tfv.SetLen(flen + 1)\n\n\t\t// Read one.\n\t\tp.back()\n\t\treturn p.readAny(fv.Index(flen), props)\n\tcase reflect.Bool:\n\t\t// Either \"true\", \"false\", 1 or 0.\n\t\tswitch tok.value {\n\t\tcase \"true\", \"1\":\n\t\t\tfv.SetBool(true)\n\t\t\treturn nil\n\t\tcase \"false\", \"0\":\n\t\t\tfv.SetBool(false)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Float32, reflect.Float64:\n\t\tv := tok.value\n\t\t// Ignore 'f' for compatibility with output generated by C++, but don't\n\t\t// remove 'f' when the value is \"-inf\" or \"inf\".\n\t\tif strings.HasSuffix(v, \"f\") && tok.value != \"-inf\" && tok.value != \"inf\" {\n\t\t\tv = v[:len(v)-1]\n\t\t}\n\t\tif f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {\n\t\t\tfv.SetFloat(f)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Int32:\n\t\tif x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {\n\t\t\tfv.SetInt(x)\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(props.Enum) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tm, ok := enumValueMaps[props.Enum]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tx, ok := m[tok.value]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tfv.SetInt(int64(x))\n\t\treturn nil\n\tcase reflect.Int64:\n\t\tif x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {\n\t\t\tfv.SetInt(x)\n\t\t\treturn nil\n\t\t}\n\n\tcase reflect.Ptr:\n\t\t// A basic field (indirected through pointer), or a repeated message/group\n\t\tp.back()\n\t\tfv.Set(reflect.New(fv.Type().Elem()))\n\t\treturn p.readAny(fv.Elem(), props)\n\tcase reflect.String:\n\t\tif tok.value[0] == '\"' || tok.value[0] == '\\'' {\n\t\t\tfv.SetString(tok.unquoted)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Struct:\n\t\tvar terminator string\n\t\tswitch tok.value {\n\t\tcase \"{\":\n\t\t\tterminator = \"}\"\n\t\tcase \"<\":\n\t\t\tterminator = \">\"\n\t\tdefault:\n\t\t\treturn p.errorf(\"expected '{' or '<', found %q\", tok.value)\n\t\t}\n\t\t// TODO: Handle nested messages which implement encoding.TextUnmarshaler.\n\t\treturn p.readStruct(fv, terminator)\n\tcase reflect.Uint32:\n\t\tif x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {\n\t\t\tfv.SetUint(uint64(x))\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Uint64:\n\t\tif x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {\n\t\t\tfv.SetUint(x)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn p.errorf(\"invalid %v: %v\", v.Type(), tok.value)\n}\n\n// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb\n// before starting to unmarshal, so any existing data in pb is always removed.\n// If a required field is not set and no other error occurs,\n// UnmarshalText returns *RequiredNotSetError.\nfunc UnmarshalText(s string, pb Message) error {\n\tif um, ok := pb.(encoding.TextUnmarshaler); ok {\n\t\terr := um.UnmarshalText([]byte(s))\n\t\treturn err\n\t}\n\tpb.Reset()\n\tv := reflect.ValueOf(pb)\n\tif pe := newTextParser(s).readStruct(v.Elem(), \"\"); pe != nil {\n\t\treturn pe\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/gorilla/context/.travis.yml",
    "content": "language: go\n\ngo:\n  - 1.0\n  - 1.1\n  - 1.2\n  - 1.3\n  - 1.4\n  - tip\n"
  },
  {
    "path": "vendor/github.com/gorilla/context/LICENSE",
    "content": "Copyright (c) 2012 Rodrigo Moraes. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n\t * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\t * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n\t * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/github.com/gorilla/context/README.md",
    "content": "context\n=======\n[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)\n\ngorilla/context is a general purpose registry for global request variables.\n\nRead the full documentation here: http://www.gorillatoolkit.org/pkg/context\n"
  },
  {
    "path": "vendor/github.com/gorilla/context/context.go",
    "content": "// Copyright 2012 The Gorilla Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage context\n\nimport (\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tmutex sync.RWMutex\n\tdata  = make(map[*http.Request]map[interface{}]interface{})\n\tdatat = make(map[*http.Request]int64)\n)\n\n// Set stores a value for a given key in a given request.\nfunc Set(r *http.Request, key, val interface{}) {\n\tmutex.Lock()\n\tif data[r] == nil {\n\t\tdata[r] = make(map[interface{}]interface{})\n\t\tdatat[r] = time.Now().Unix()\n\t}\n\tdata[r][key] = val\n\tmutex.Unlock()\n}\n\n// Get returns a value stored for a given key in a given request.\nfunc Get(r *http.Request, key interface{}) interface{} {\n\tmutex.RLock()\n\tif ctx := data[r]; ctx != nil {\n\t\tvalue := ctx[key]\n\t\tmutex.RUnlock()\n\t\treturn value\n\t}\n\tmutex.RUnlock()\n\treturn nil\n}\n\n// GetOk returns stored value and presence state like multi-value return of map access.\nfunc GetOk(r *http.Request, key interface{}) (interface{}, bool) {\n\tmutex.RLock()\n\tif _, ok := data[r]; ok {\n\t\tvalue, ok := data[r][key]\n\t\tmutex.RUnlock()\n\t\treturn value, ok\n\t}\n\tmutex.RUnlock()\n\treturn nil, false\n}\n\n// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.\nfunc GetAll(r *http.Request) map[interface{}]interface{} {\n\tmutex.RLock()\n\tif context, ok := data[r]; ok {\n\t\tresult := make(map[interface{}]interface{}, len(context))\n\t\tfor k, v := range context {\n\t\t\tresult[k] = v\n\t\t}\n\t\tmutex.RUnlock()\n\t\treturn result\n\t}\n\tmutex.RUnlock()\n\treturn nil\n}\n\n// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if\n// the request was registered.\nfunc GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {\n\tmutex.RLock()\n\tcontext, ok := data[r]\n\tresult := make(map[interface{}]interface{}, len(context))\n\tfor k, v := range context {\n\t\tresult[k] = v\n\t}\n\tmutex.RUnlock()\n\treturn result, ok\n}\n\n// Delete removes a value stored for a given key in a given request.\nfunc Delete(r *http.Request, key interface{}) {\n\tmutex.Lock()\n\tif data[r] != nil {\n\t\tdelete(data[r], key)\n\t}\n\tmutex.Unlock()\n}\n\n// Clear removes all values stored for a given request.\n//\n// This is usually called by a handler wrapper to clean up request\n// variables at the end of a request lifetime. See ClearHandler().\nfunc Clear(r *http.Request) {\n\tmutex.Lock()\n\tclear(r)\n\tmutex.Unlock()\n}\n\n// clear is Clear without the lock.\nfunc clear(r *http.Request) {\n\tdelete(data, r)\n\tdelete(datat, r)\n}\n\n// Purge removes request data stored for longer than maxAge, in seconds.\n// It returns the amount of requests removed.\n//\n// If maxAge <= 0, all request data is removed.\n//\n// This is only used for sanity check: in case context cleaning was not\n// properly set some request data can be kept forever, consuming an increasing\n// amount of memory. In case this is detected, Purge() must be called\n// periodically until the problem is fixed.\nfunc Purge(maxAge int) int {\n\tmutex.Lock()\n\tcount := 0\n\tif maxAge <= 0 {\n\t\tcount = len(data)\n\t\tdata = make(map[*http.Request]map[interface{}]interface{})\n\t\tdatat = make(map[*http.Request]int64)\n\t} else {\n\t\tmin := time.Now().Unix() - int64(maxAge)\n\t\tfor r := range data {\n\t\t\tif datat[r] < min {\n\t\t\t\tclear(r)\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t}\n\tmutex.Unlock()\n\treturn count\n}\n\n// ClearHandler wraps an http.Handler and clears request values at the end\n// of a request lifetime.\nfunc ClearHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer Clear(r)\n\t\th.ServeHTTP(w, r)\n\t})\n}\n"
  },
  {
    "path": "vendor/github.com/gorilla/context/doc.go",
    "content": "// Copyright 2012 The Gorilla Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n/*\nPackage context stores values shared during a request lifetime.\n\nFor example, a router can set variables extracted from the URL and later\napplication handlers can access those values, or it can be used to store\nsessions values to be saved at the end of a request. There are several\nothers common uses.\n\nThe idea was posted by Brad Fitzpatrick to the go-nuts mailing list:\n\n\thttp://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53\n\nHere's the basic usage: first define the keys that you will need. The key\ntype is interface{} so a key can be of any type that supports equality.\nHere we define a key using a custom int type to avoid name collisions:\n\n\tpackage foo\n\n\timport (\n\t\t\"github.com/gorilla/context\"\n\t)\n\n\ttype key int\n\n\tconst MyKey key = 0\n\nThen set a variable. Variables are bound to an http.Request object, so you\nneed a request instance to set a value:\n\n\tcontext.Set(r, MyKey, \"bar\")\n\nThe application can later access the variable using the same key you provided:\n\n\tfunc MyHandler(w http.ResponseWriter, r *http.Request) {\n\t\t// val is \"bar\".\n\t\tval := context.Get(r, foo.MyKey)\n\n\t\t// returns (\"bar\", true)\n\t\tval, ok := context.GetOk(r, foo.MyKey)\n\t\t// ...\n\t}\n\nAnd that's all about the basic usage. We discuss some other ideas below.\n\nAny type can be stored in the context. To enforce a given type, make the key\nprivate and wrap Get() and Set() to accept and return values of a specific\ntype:\n\n\ttype key int\n\n\tconst mykey key = 0\n\n\t// GetMyKey returns a value for this package from the request values.\n\tfunc GetMyKey(r *http.Request) SomeType {\n\t\tif rv := context.Get(r, mykey); rv != nil {\n\t\t\treturn rv.(SomeType)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// SetMyKey sets a value for this package in the request values.\n\tfunc SetMyKey(r *http.Request, val SomeType) {\n\t\tcontext.Set(r, mykey, val)\n\t}\n\nVariables must be cleared at the end of a request, to remove all values\nthat were stored. This can be done in an http.Handler, after a request was\nserved. Just call Clear() passing the request:\n\n\tcontext.Clear(r)\n\n...or use ClearHandler(), which conveniently wraps an http.Handler to clear\nvariables at the end of a request lifetime.\n\nThe Routers from the packages gorilla/mux and gorilla/pat call Clear()\nso if you are using either of them you don't need to clear the context manually.\n*/\npackage context\n"
  },
  {
    "path": "vendor/github.com/gorilla/mux/.travis.yml",
    "content": "language: go\n\ngo:\n  - 1.0\n  - 1.1\n  - 1.2\n  - tip\n"
  },
  {
    "path": "vendor/github.com/gorilla/mux/LICENSE",
    "content": "Copyright (c) 2012 Rodrigo Moraes. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n\t * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\t * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n\t * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/github.com/gorilla/mux/README.md",
    "content": "mux\n===\n[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux)\n\ngorilla/mux is a powerful URL router and dispatcher.\n\nRead the full documentation here: http://www.gorillatoolkit.org/pkg/mux\n"
  },
  {
    "path": "vendor/github.com/gorilla/mux/doc.go",
    "content": "// Copyright 2012 The Gorilla Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n/*\nPackage gorilla/mux implements a request router and dispatcher.\n\nThe name mux stands for \"HTTP request multiplexer\". Like the standard\nhttp.ServeMux, mux.Router matches incoming requests against a list of\nregistered routes and calls a handler for the route that matches the URL\nor other conditions. The main features are:\n\n\t* Requests can be matched based on URL host, path, path prefix, schemes,\n\t  header and query values, HTTP methods or using custom matchers.\n\t* URL hosts and paths can have variables with an optional regular\n\t  expression.\n\t* Registered URLs can be built, or \"reversed\", which helps maintaining\n\t  references to resources.\n\t* Routes can be used as subrouters: nested routes are only tested if the\n\t  parent route matches. This is useful to define groups of routes that\n\t  share common conditions like a host, a path prefix or other repeated\n\t  attributes. As a bonus, this optimizes request matching.\n\t* It implements the http.Handler interface so it is compatible with the\n\t  standard http.ServeMux.\n\nLet's start registering a couple of URL paths and handlers:\n\n\tfunc main() {\n\t\tr := mux.NewRouter()\n\t\tr.HandleFunc(\"/\", HomeHandler)\n\t\tr.HandleFunc(\"/products\", ProductsHandler)\n\t\tr.HandleFunc(\"/articles\", ArticlesHandler)\n\t\thttp.Handle(\"/\", r)\n\t}\n\nHere we register three routes mapping URL paths to handlers. This is\nequivalent to how http.HandleFunc() works: if an incoming request URL matches\none of the paths, the corresponding handler is called passing\n(http.ResponseWriter, *http.Request) as parameters.\n\nPaths can have variables. They are defined using the format {name} or\n{name:pattern}. If a regular expression pattern is not defined, the matched\nvariable will be anything until the next slash. For example:\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/products/{key}\", ProductHandler)\n\tr.HandleFunc(\"/articles/{category}/\", ArticlesCategoryHandler)\n\tr.HandleFunc(\"/articles/{category}/{id:[0-9]+}\", ArticleHandler)\n\nThe names are used to create a map of route variables which can be retrieved\ncalling mux.Vars():\n\n\tvars := mux.Vars(request)\n\tcategory := vars[\"category\"]\n\nAnd this is all you need to know about the basic usage. More advanced options\nare explained below.\n\nRoutes can also be restricted to a domain or subdomain. Just define a host\npattern to be matched. They can also have variables:\n\n\tr := mux.NewRouter()\n\t// Only matches if domain is \"www.domain.com\".\n\tr.Host(\"www.domain.com\")\n\t// Matches a dynamic subdomain.\n\tr.Host(\"{subdomain:[a-z]+}.domain.com\")\n\nThere are several other matchers that can be added. To match path prefixes:\n\n\tr.PathPrefix(\"/products/\")\n\n...or HTTP methods:\n\n\tr.Methods(\"GET\", \"POST\")\n\n...or URL schemes:\n\n\tr.Schemes(\"https\")\n\n...or header values:\n\n\tr.Headers(\"X-Requested-With\", \"XMLHttpRequest\")\n\n...or query values:\n\n\tr.Queries(\"key\", \"value\")\n\n...or to use a custom matcher function:\n\n\tr.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {\n\t\treturn r.ProtoMajor == 0\n\t})\n\n...and finally, it is possible to combine several matchers in a single route:\n\n\tr.HandleFunc(\"/products\", ProductsHandler).\n\t  Host(\"www.domain.com\").\n\t  Methods(\"GET\").\n\t  Schemes(\"http\")\n\nSetting the same matching conditions again and again can be boring, so we have\na way to group several routes that share the same requirements.\nWe call it \"subrouting\".\n\nFor example, let's say we have several URLs that should only match when the\nhost is \"www.domain.com\". Create a route for that host and get a \"subrouter\"\nfrom it:\n\n\tr := mux.NewRouter()\n\ts := r.Host(\"www.domain.com\").Subrouter()\n\nThen register routes in the subrouter:\n\n\ts.HandleFunc(\"/products/\", ProductsHandler)\n\ts.HandleFunc(\"/products/{key}\", ProductHandler)\n\ts.HandleFunc(\"/articles/{category}/{id:[0-9]+}\"), ArticleHandler)\n\nThe three URL paths we registered above will only be tested if the domain is\n\"www.domain.com\", because the subrouter is tested first. This is not\nonly convenient, but also optimizes request matching. You can create\nsubrouters combining any attribute matchers accepted by a route.\n\nSubrouters can be used to create domain or path \"namespaces\": you define\nsubrouters in a central place and then parts of the app can register its\npaths relatively to a given subrouter.\n\nThere's one more thing about subroutes. When a subrouter has a path prefix,\nthe inner routes use it as base for their paths:\n\n\tr := mux.NewRouter()\n\ts := r.PathPrefix(\"/products\").Subrouter()\n\t// \"/products/\"\n\ts.HandleFunc(\"/\", ProductsHandler)\n\t// \"/products/{key}/\"\n\ts.HandleFunc(\"/{key}/\", ProductHandler)\n\t// \"/products/{key}/details\"\n\ts.HandleFunc(\"/{key}/details\", ProductDetailsHandler)\n\nNow let's see how to build registered URLs.\n\nRoutes can be named. All routes that define a name can have their URLs built,\nor \"reversed\". We define a name calling Name() on a route. For example:\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/articles/{category}/{id:[0-9]+}\", ArticleHandler).\n\t  Name(\"article\")\n\nTo build a URL, get the route and call the URL() method, passing a sequence of\nkey/value pairs for the route variables. For the previous route, we would do:\n\n\turl, err := r.Get(\"article\").URL(\"category\", \"technology\", \"id\", \"42\")\n\n...and the result will be a url.URL with the following path:\n\n\t\"/articles/technology/42\"\n\nThis also works for host variables:\n\n\tr := mux.NewRouter()\n\tr.Host(\"{subdomain}.domain.com\").\n\t  Path(\"/articles/{category}/{id:[0-9]+}\").\n\t  HandlerFunc(ArticleHandler).\n\t  Name(\"article\")\n\n\t// url.String() will be \"http://news.domain.com/articles/technology/42\"\n\turl, err := r.Get(\"article\").URL(\"subdomain\", \"news\",\n\t                                 \"category\", \"technology\",\n\t                                 \"id\", \"42\")\n\nAll variables defined in the route are required, and their values must\nconform to the corresponding patterns. These requirements guarantee that a\ngenerated URL will always match a registered route -- the only exception is\nfor explicitly defined \"build-only\" routes which never match.\n\nThere's also a way to build only the URL host or path for a route:\nuse the methods URLHost() or URLPath() instead. For the previous route,\nwe would do:\n\n\t// \"http://news.domain.com/\"\n\thost, err := r.Get(\"article\").URLHost(\"subdomain\", \"news\")\n\n\t// \"/articles/technology/42\"\n\tpath, err := r.Get(\"article\").URLPath(\"category\", \"technology\", \"id\", \"42\")\n\nAnd if you use subrouters, host and path defined separately can be built\nas well:\n\n\tr := mux.NewRouter()\n\ts := r.Host(\"{subdomain}.domain.com\").Subrouter()\n\ts.Path(\"/articles/{category}/{id:[0-9]+}\").\n\t  HandlerFunc(ArticleHandler).\n\t  Name(\"article\")\n\n\t// \"http://news.domain.com/articles/technology/42\"\n\turl, err := r.Get(\"article\").URL(\"subdomain\", \"news\",\n\t                                 \"category\", \"technology\",\n\t                                 \"id\", \"42\")\n*/\npackage mux\n"
  },
  {
    "path": "vendor/github.com/gorilla/mux/mux.go",
    "content": "// Copyright 2012 The Gorilla Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mux\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"path\"\n\n\t\"github.com/gorilla/context\"\n)\n\n// NewRouter returns a new router instance.\nfunc NewRouter() *Router {\n\treturn &Router{namedRoutes: make(map[string]*Route), KeepContext: false}\n}\n\n// Router registers routes to be matched and dispatches a handler.\n//\n// It implements the http.Handler interface, so it can be registered to serve\n// requests:\n//\n//     var router = mux.NewRouter()\n//\n//     func main() {\n//         http.Handle(\"/\", router)\n//     }\n//\n// Or, for Google App Engine, register it in a init() function:\n//\n//     func init() {\n//         http.Handle(\"/\", router)\n//     }\n//\n// This will send all incoming requests to the router.\ntype Router struct {\n\t// Configurable Handler to be used when no route matches.\n\tNotFoundHandler http.Handler\n\t// Parent route, if this is a subrouter.\n\tparent parentRoute\n\t// Routes to be matched, in order.\n\troutes []*Route\n\t// Routes by name for URL building.\n\tnamedRoutes map[string]*Route\n\t// See Router.StrictSlash(). This defines the flag for new routes.\n\tstrictSlash bool\n\t// If true, do not clear the request context after handling the request\n\tKeepContext bool\n}\n\n// Match matches registered routes against the request.\nfunc (r *Router) Match(req *http.Request, match *RouteMatch) bool {\n\tfor _, route := range r.routes {\n\t\tif route.Match(req, match) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// ServeHTTP dispatches the handler registered in the matched route.\n//\n// When there is a match, the route variables can be retrieved calling\n// mux.Vars(request).\nfunc (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\t// Clean path to canonical form and redirect.\n\tif p := cleanPath(req.URL.Path); p != req.URL.Path {\n\n\t\t// Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query.\n\t\t// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:\n\t\t// http://code.google.com/p/go/issues/detail?id=5252\n\t\turl := *req.URL\n\t\turl.Path = p\n\t\tp = url.String()\n\n\t\tw.Header().Set(\"Location\", p)\n\t\tw.WriteHeader(http.StatusMovedPermanently)\n\t\treturn\n\t}\n\tvar match RouteMatch\n\tvar handler http.Handler\n\tif r.Match(req, &match) {\n\t\thandler = match.Handler\n\t\tsetVars(req, match.Vars)\n\t\tsetCurrentRoute(req, match.Route)\n\t}\n\tif handler == nil {\n\t\thandler = r.NotFoundHandler\n\t\tif handler == nil {\n\t\t\thandler = http.NotFoundHandler()\n\t\t}\n\t}\n\tif !r.KeepContext {\n\t\tdefer context.Clear(req)\n\t}\n\thandler.ServeHTTP(w, req)\n}\n\n// Get returns a route registered with the given name.\nfunc (r *Router) Get(name string) *Route {\n\treturn r.getNamedRoutes()[name]\n}\n\n// GetRoute returns a route registered with the given name. This method\n// was renamed to Get() and remains here for backwards compatibility.\nfunc (r *Router) GetRoute(name string) *Route {\n\treturn r.getNamedRoutes()[name]\n}\n\n// StrictSlash defines the trailing slash behavior for new routes. The initial\n// value is false.\n//\n// When true, if the route path is \"/path/\", accessing \"/path\" will redirect\n// to the former and vice versa. In other words, your application will always\n// see the path as specified in the route.\n//\n// When false, if the route path is \"/path\", accessing \"/path/\" will not match\n// this route and vice versa.\n//\n// Special case: when a route sets a path prefix using the PathPrefix() method,\n// strict slash is ignored for that route because the redirect behavior can't\n// be determined from a prefix alone. However, any subrouters created from that\n// route inherit the original StrictSlash setting.\nfunc (r *Router) StrictSlash(value bool) *Router {\n\tr.strictSlash = value\n\treturn r\n}\n\n// ----------------------------------------------------------------------------\n// parentRoute\n// ----------------------------------------------------------------------------\n\n// getNamedRoutes returns the map where named routes are registered.\nfunc (r *Router) getNamedRoutes() map[string]*Route {\n\tif r.namedRoutes == nil {\n\t\tif r.parent != nil {\n\t\t\tr.namedRoutes = r.parent.getNamedRoutes()\n\t\t} else {\n\t\t\tr.namedRoutes = make(map[string]*Route)\n\t\t}\n\t}\n\treturn r.namedRoutes\n}\n\n// getRegexpGroup returns regexp definitions from the parent route, if any.\nfunc (r *Router) getRegexpGroup() *routeRegexpGroup {\n\tif r.parent != nil {\n\t\treturn r.parent.getRegexpGroup()\n\t}\n\treturn nil\n}\n\nfunc (r *Router) buildVars(m map[string]string) map[string]string {\n\tif r.parent != nil {\n\t\tm = r.parent.buildVars(m)\n\t}\n\treturn m\n}\n\n// ----------------------------------------------------------------------------\n// Route factories\n// ----------------------------------------------------------------------------\n\n// NewRoute registers an empty route.\nfunc (r *Router) NewRoute() *Route {\n\troute := &Route{parent: r, strictSlash: r.strictSlash}\n\tr.routes = append(r.routes, route)\n\treturn route\n}\n\n// Handle registers a new route with a matcher for the URL path.\n// See Route.Path() and Route.Handler().\nfunc (r *Router) Handle(path string, handler http.Handler) *Route {\n\treturn r.NewRoute().Path(path).Handler(handler)\n}\n\n// HandleFunc registers a new route with a matcher for the URL path.\n// See Route.Path() and Route.HandlerFunc().\nfunc (r *Router) HandleFunc(path string, f func(http.ResponseWriter,\n\t*http.Request)) *Route {\n\treturn r.NewRoute().Path(path).HandlerFunc(f)\n}\n\n// Headers registers a new route with a matcher for request header values.\n// See Route.Headers().\nfunc (r *Router) Headers(pairs ...string) *Route {\n\treturn r.NewRoute().Headers(pairs...)\n}\n\n// Host registers a new route with a matcher for the URL host.\n// See Route.Host().\nfunc (r *Router) Host(tpl string) *Route {\n\treturn r.NewRoute().Host(tpl)\n}\n\n// MatcherFunc registers a new route with a custom matcher function.\n// See Route.MatcherFunc().\nfunc (r *Router) MatcherFunc(f MatcherFunc) *Route {\n\treturn r.NewRoute().MatcherFunc(f)\n}\n\n// Methods registers a new route with a matcher for HTTP methods.\n// See Route.Methods().\nfunc (r *Router) Methods(methods ...string) *Route {\n\treturn r.NewRoute().Methods(methods...)\n}\n\n// Path registers a new route with a matcher for the URL path.\n// See Route.Path().\nfunc (r *Router) Path(tpl string) *Route {\n\treturn r.NewRoute().Path(tpl)\n}\n\n// PathPrefix registers a new route with a matcher for the URL path prefix.\n// See Route.PathPrefix().\nfunc (r *Router) PathPrefix(tpl string) *Route {\n\treturn r.NewRoute().PathPrefix(tpl)\n}\n\n// Queries registers a new route with a matcher for URL query values.\n// See Route.Queries().\nfunc (r *Router) Queries(pairs ...string) *Route {\n\treturn r.NewRoute().Queries(pairs...)\n}\n\n// Schemes registers a new route with a matcher for URL schemes.\n// See Route.Schemes().\nfunc (r *Router) Schemes(schemes ...string) *Route {\n\treturn r.NewRoute().Schemes(schemes...)\n}\n\n// BuildVars registers a new route with a custom function for modifying\n// route variables before building a URL.\nfunc (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {\n\treturn r.NewRoute().BuildVarsFunc(f)\n}\n\n// ----------------------------------------------------------------------------\n// Context\n// ----------------------------------------------------------------------------\n\n// RouteMatch stores information about a matched route.\ntype RouteMatch struct {\n\tRoute   *Route\n\tHandler http.Handler\n\tVars    map[string]string\n}\n\ntype contextKey int\n\nconst (\n\tvarsKey contextKey = iota\n\trouteKey\n)\n\n// Vars returns the route variables for the current request, if any.\nfunc Vars(r *http.Request) map[string]string {\n\tif rv := context.Get(r, varsKey); rv != nil {\n\t\treturn rv.(map[string]string)\n\t}\n\treturn nil\n}\n\n// CurrentRoute returns the matched route for the current request, if any.\nfunc CurrentRoute(r *http.Request) *Route {\n\tif rv := context.Get(r, routeKey); rv != nil {\n\t\treturn rv.(*Route)\n\t}\n\treturn nil\n}\n\nfunc setVars(r *http.Request, val interface{}) {\n\tcontext.Set(r, varsKey, val)\n}\n\nfunc setCurrentRoute(r *http.Request, val interface{}) {\n\tcontext.Set(r, routeKey, val)\n}\n\n// ----------------------------------------------------------------------------\n// Helpers\n// ----------------------------------------------------------------------------\n\n// cleanPath returns the canonical path for p, eliminating . and .. elements.\n// Borrowed from the net/http package.\nfunc cleanPath(p string) string {\n\tif p == \"\" {\n\t\treturn \"/\"\n\t}\n\tif p[0] != '/' {\n\t\tp = \"/\" + p\n\t}\n\tnp := path.Clean(p)\n\t// path.Clean removes trailing slash except for root;\n\t// put the trailing slash back if necessary.\n\tif p[len(p)-1] == '/' && np != \"/\" {\n\t\tnp += \"/\"\n\t}\n\treturn np\n}\n\n// uniqueVars returns an error if two slices contain duplicated strings.\nfunc uniqueVars(s1, s2 []string) error {\n\tfor _, v1 := range s1 {\n\t\tfor _, v2 := range s2 {\n\t\t\tif v1 == v2 {\n\t\t\t\treturn fmt.Errorf(\"mux: duplicated route variable %q\", v2)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// mapFromPairs converts variadic string parameters to a string map.\nfunc mapFromPairs(pairs ...string) (map[string]string, error) {\n\tlength := len(pairs)\n\tif length%2 != 0 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"mux: number of parameters must be multiple of 2, got %v\", pairs)\n\t}\n\tm := make(map[string]string, length/2)\n\tfor i := 0; i < length; i += 2 {\n\t\tm[pairs[i]] = pairs[i+1]\n\t}\n\treturn m, nil\n}\n\n// matchInArray returns true if the given string value is in the array.\nfunc matchInArray(arr []string, value string) bool {\n\tfor _, v := range arr {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// matchMap returns true if the given key/value pairs exist in a given map.\nfunc matchMap(toCheck map[string]string, toMatch map[string][]string,\n\tcanonicalKey bool) bool {\n\tfor k, v := range toCheck {\n\t\t// Check if key exists.\n\t\tif canonicalKey {\n\t\t\tk = http.CanonicalHeaderKey(k)\n\t\t}\n\t\tif values := toMatch[k]; values == nil {\n\t\t\treturn false\n\t\t} else if v != \"\" {\n\t\t\t// If value was defined as an empty string we only check that the\n\t\t\t// key exists. Otherwise we also check for equality.\n\t\t\tvalueExists := false\n\t\t\tfor _, value := range values {\n\t\t\t\tif v == value {\n\t\t\t\t\tvalueExists = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !valueExists {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/gorilla/mux/regexp.go",
    "content": "// Copyright 2012 The Gorilla Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mux\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n// newRouteRegexp parses a route template and returns a routeRegexp,\n// used to match a host, a path or a query string.\n//\n// It will extract named variables, assemble a regexp to be matched, create\n// a \"reverse\" template to build URLs and compile regexps to validate variable\n// values used in URL building.\n//\n// Previously we accepted only Python-like identifiers for variable\n// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that\n// name and pattern can't be empty, and names can't contain a colon.\nfunc newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) {\n\t// Check if it is well-formed.\n\tidxs, errBraces := braceIndices(tpl)\n\tif errBraces != nil {\n\t\treturn nil, errBraces\n\t}\n\t// Backup the original.\n\ttemplate := tpl\n\t// Now let's parse it.\n\tdefaultPattern := \"[^/]+\"\n\tif matchQuery {\n\t\tdefaultPattern = \"[^?&]+\"\n\t\tmatchPrefix = true\n\t} else if matchHost {\n\t\tdefaultPattern = \"[^.]+\"\n\t\tmatchPrefix = false\n\t}\n\t// Only match strict slash if not matching\n\tif matchPrefix || matchHost || matchQuery {\n\t\tstrictSlash = false\n\t}\n\t// Set a flag for strictSlash.\n\tendSlash := false\n\tif strictSlash && strings.HasSuffix(tpl, \"/\") {\n\t\ttpl = tpl[:len(tpl)-1]\n\t\tendSlash = true\n\t}\n\tvarsN := make([]string, len(idxs)/2)\n\tvarsR := make([]*regexp.Regexp, len(idxs)/2)\n\tpattern := bytes.NewBufferString(\"\")\n\tif !matchQuery {\n\t\tpattern.WriteByte('^')\n\t}\n\treverse := bytes.NewBufferString(\"\")\n\tvar end int\n\tvar err error\n\tfor i := 0; i < len(idxs); i += 2 {\n\t\t// Set all values we are interested in.\n\t\traw := tpl[end:idxs[i]]\n\t\tend = idxs[i+1]\n\t\tparts := strings.SplitN(tpl[idxs[i]+1:end-1], \":\", 2)\n\t\tname := parts[0]\n\t\tpatt := defaultPattern\n\t\tif len(parts) == 2 {\n\t\t\tpatt = parts[1]\n\t\t}\n\t\t// Name or pattern can't be empty.\n\t\tif name == \"\" || patt == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"mux: missing name or pattern in %q\",\n\t\t\t\ttpl[idxs[i]:end])\n\t\t}\n\t\t// Build the regexp pattern.\n\t\tfmt.Fprintf(pattern, \"%s(%s)\", regexp.QuoteMeta(raw), patt)\n\t\t// Build the reverse template.\n\t\tfmt.Fprintf(reverse, \"%s%%s\", raw)\n\t\t// Append variable name and compiled pattern.\n\t\tvarsN[i/2] = name\n\t\tvarsR[i/2], err = regexp.Compile(fmt.Sprintf(\"^%s$\", patt))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// Add the remaining.\n\traw := tpl[end:]\n\tpattern.WriteString(regexp.QuoteMeta(raw))\n\tif strictSlash {\n\t\tpattern.WriteString(\"[/]?\")\n\t}\n\tif !matchPrefix {\n\t\tpattern.WriteByte('$')\n\t}\n\treverse.WriteString(raw)\n\tif endSlash {\n\t\treverse.WriteByte('/')\n\t}\n\t// Compile full regexp.\n\treg, errCompile := regexp.Compile(pattern.String())\n\tif errCompile != nil {\n\t\treturn nil, errCompile\n\t}\n\t// Done!\n\treturn &routeRegexp{\n\t\ttemplate:    template,\n\t\tmatchHost:   matchHost,\n\t\tmatchQuery:  matchQuery,\n\t\tstrictSlash: strictSlash,\n\t\tregexp:      reg,\n\t\treverse:     reverse.String(),\n\t\tvarsN:       varsN,\n\t\tvarsR:       varsR,\n\t}, nil\n}\n\n// routeRegexp stores a regexp to match a host or path and information to\n// collect and validate route variables.\ntype routeRegexp struct {\n\t// The unmodified template.\n\ttemplate string\n\t// True for host match, false for path or query string match.\n\tmatchHost bool\n\t// True for query string match, false for path and host match.\n\tmatchQuery bool\n\t// The strictSlash value defined on the route, but disabled if PathPrefix was used.\n\tstrictSlash bool\n\t// Expanded regexp.\n\tregexp *regexp.Regexp\n\t// Reverse template.\n\treverse string\n\t// Variable names.\n\tvarsN []string\n\t// Variable regexps (validators).\n\tvarsR []*regexp.Regexp\n}\n\n// Match matches the regexp against the URL host or path.\nfunc (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {\n\tif !r.matchHost {\n\t\tif r.matchQuery {\n\t\t\treturn r.regexp.MatchString(req.URL.RawQuery)\n\t\t} else {\n\t\t\treturn r.regexp.MatchString(req.URL.Path)\n\t\t}\n\t}\n\treturn r.regexp.MatchString(getHost(req))\n}\n\n// url builds a URL part using the given values.\nfunc (r *routeRegexp) url(values map[string]string) (string, error) {\n\turlValues := make([]interface{}, len(r.varsN))\n\tfor k, v := range r.varsN {\n\t\tvalue, ok := values[v]\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"mux: missing route variable %q\", v)\n\t\t}\n\t\turlValues[k] = value\n\t}\n\trv := fmt.Sprintf(r.reverse, urlValues...)\n\tif !r.regexp.MatchString(rv) {\n\t\t// The URL is checked against the full regexp, instead of checking\n\t\t// individual variables. This is faster but to provide a good error\n\t\t// message, we check individual regexps if the URL doesn't match.\n\t\tfor k, v := range r.varsN {\n\t\t\tif !r.varsR[k].MatchString(values[v]) {\n\t\t\t\treturn \"\", fmt.Errorf(\n\t\t\t\t\t\"mux: variable %q doesn't match, expected %q\", values[v],\n\t\t\t\t\tr.varsR[k].String())\n\t\t\t}\n\t\t}\n\t}\n\treturn rv, nil\n}\n\n// braceIndices returns the first level curly brace indices from a string.\n// It returns an error in case of unbalanced braces.\nfunc braceIndices(s string) ([]int, error) {\n\tvar level, idx int\n\tidxs := make([]int, 0)\n\tfor i := 0; i < len(s); i++ {\n\t\tswitch s[i] {\n\t\tcase '{':\n\t\t\tif level++; level == 1 {\n\t\t\t\tidx = i\n\t\t\t}\n\t\tcase '}':\n\t\t\tif level--; level == 0 {\n\t\t\t\tidxs = append(idxs, idx, i+1)\n\t\t\t} else if level < 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"mux: unbalanced braces in %q\", s)\n\t\t\t}\n\t\t}\n\t}\n\tif level != 0 {\n\t\treturn nil, fmt.Errorf(\"mux: unbalanced braces in %q\", s)\n\t}\n\treturn idxs, nil\n}\n\n// ----------------------------------------------------------------------------\n// routeRegexpGroup\n// ----------------------------------------------------------------------------\n\n// routeRegexpGroup groups the route matchers that carry variables.\ntype routeRegexpGroup struct {\n\thost    *routeRegexp\n\tpath    *routeRegexp\n\tqueries []*routeRegexp\n}\n\n// setMatch extracts the variables from the URL once a route matches.\nfunc (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {\n\t// Store host variables.\n\tif v.host != nil {\n\t\thostVars := v.host.regexp.FindStringSubmatch(getHost(req))\n\t\tif hostVars != nil {\n\t\t\tfor k, v := range v.host.varsN {\n\t\t\t\tm.Vars[v] = hostVars[k+1]\n\t\t\t}\n\t\t}\n\t}\n\t// Store path variables.\n\tif v.path != nil {\n\t\tpathVars := v.path.regexp.FindStringSubmatch(req.URL.Path)\n\t\tif pathVars != nil {\n\t\t\tfor k, v := range v.path.varsN {\n\t\t\t\tm.Vars[v] = pathVars[k+1]\n\t\t\t}\n\t\t\t// Check if we should redirect.\n\t\t\tif v.path.strictSlash {\n\t\t\t\tp1 := strings.HasSuffix(req.URL.Path, \"/\")\n\t\t\t\tp2 := strings.HasSuffix(v.path.template, \"/\")\n\t\t\t\tif p1 != p2 {\n\t\t\t\t\tu, _ := url.Parse(req.URL.String())\n\t\t\t\t\tif p1 {\n\t\t\t\t\t\tu.Path = u.Path[:len(u.Path)-1]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tu.Path += \"/\"\n\t\t\t\t\t}\n\t\t\t\t\tm.Handler = http.RedirectHandler(u.String(), 301)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// Store query string variables.\n\trawQuery := req.URL.RawQuery\n\tfor _, q := range v.queries {\n\t\tqueryVars := q.regexp.FindStringSubmatch(rawQuery)\n\t\tif queryVars != nil {\n\t\t\tfor k, v := range q.varsN {\n\t\t\t\tm.Vars[v] = queryVars[k+1]\n\t\t\t}\n\t\t}\n\t}\n}\n\n// getHost tries its best to return the request host.\nfunc getHost(r *http.Request) string {\n\tif r.URL.IsAbs() {\n\t\treturn r.URL.Host\n\t}\n\thost := r.Host\n\t// Slice off any port information.\n\tif i := strings.Index(host, \":\"); i != -1 {\n\t\thost = host[:i]\n\t}\n\treturn host\n\n}\n"
  },
  {
    "path": "vendor/github.com/gorilla/mux/route.go",
    "content": "// Copyright 2012 The Gorilla Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mux\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n)\n\n// Route stores information to match a request and build URLs.\ntype Route struct {\n\t// Parent where the route was registered (a Router).\n\tparent parentRoute\n\t// Request handler for the route.\n\thandler http.Handler\n\t// List of matchers.\n\tmatchers []matcher\n\t// Manager for the variables from host and path.\n\tregexp *routeRegexpGroup\n\t// If true, when the path pattern is \"/path/\", accessing \"/path\" will\n\t// redirect to the former and vice versa.\n\tstrictSlash bool\n\t// If true, this route never matches: it is only used to build URLs.\n\tbuildOnly bool\n\t// The name used to build URLs.\n\tname string\n\t// Error resulted from building a route.\n\terr error\n\n\tbuildVarsFunc BuildVarsFunc\n}\n\n// Match matches the route against the request.\nfunc (r *Route) Match(req *http.Request, match *RouteMatch) bool {\n\tif r.buildOnly || r.err != nil {\n\t\treturn false\n\t}\n\t// Match everything.\n\tfor _, m := range r.matchers {\n\t\tif matched := m.Match(req, match); !matched {\n\t\t\treturn false\n\t\t}\n\t}\n\t// Yay, we have a match. Let's collect some info about it.\n\tif match.Route == nil {\n\t\tmatch.Route = r\n\t}\n\tif match.Handler == nil {\n\t\tmatch.Handler = r.handler\n\t}\n\tif match.Vars == nil {\n\t\tmatch.Vars = make(map[string]string)\n\t}\n\t// Set variables.\n\tif r.regexp != nil {\n\t\tr.regexp.setMatch(req, match, r)\n\t}\n\treturn true\n}\n\n// ----------------------------------------------------------------------------\n// Route attributes\n// ----------------------------------------------------------------------------\n\n// GetError returns an error resulted from building the route, if any.\nfunc (r *Route) GetError() error {\n\treturn r.err\n}\n\n// BuildOnly sets the route to never match: it is only used to build URLs.\nfunc (r *Route) BuildOnly() *Route {\n\tr.buildOnly = true\n\treturn r\n}\n\n// Handler --------------------------------------------------------------------\n\n// Handler sets a handler for the route.\nfunc (r *Route) Handler(handler http.Handler) *Route {\n\tif r.err == nil {\n\t\tr.handler = handler\n\t}\n\treturn r\n}\n\n// HandlerFunc sets a handler function for the route.\nfunc (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {\n\treturn r.Handler(http.HandlerFunc(f))\n}\n\n// GetHandler returns the handler for the route, if any.\nfunc (r *Route) GetHandler() http.Handler {\n\treturn r.handler\n}\n\n// Name -----------------------------------------------------------------------\n\n// Name sets the name for the route, used to build URLs.\n// If the name was registered already it will be overwritten.\nfunc (r *Route) Name(name string) *Route {\n\tif r.name != \"\" {\n\t\tr.err = fmt.Errorf(\"mux: route already has name %q, can't set %q\",\n\t\t\tr.name, name)\n\t}\n\tif r.err == nil {\n\t\tr.name = name\n\t\tr.getNamedRoutes()[name] = r\n\t}\n\treturn r\n}\n\n// GetName returns the name for the route, if any.\nfunc (r *Route) GetName() string {\n\treturn r.name\n}\n\n// ----------------------------------------------------------------------------\n// Matchers\n// ----------------------------------------------------------------------------\n\n// matcher types try to match a request.\ntype matcher interface {\n\tMatch(*http.Request, *RouteMatch) bool\n}\n\n// addMatcher adds a matcher to the route.\nfunc (r *Route) addMatcher(m matcher) *Route {\n\tif r.err == nil {\n\t\tr.matchers = append(r.matchers, m)\n\t}\n\treturn r\n}\n\n// addRegexpMatcher adds a host or path matcher and builder to a route.\nfunc (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {\n\tif r.err != nil {\n\t\treturn r.err\n\t}\n\tr.regexp = r.getRegexpGroup()\n\tif !matchHost && !matchQuery {\n\t\tif len(tpl) == 0 || tpl[0] != '/' {\n\t\t\treturn fmt.Errorf(\"mux: path must start with a slash, got %q\", tpl)\n\t\t}\n\t\tif r.regexp.path != nil {\n\t\t\ttpl = strings.TrimRight(r.regexp.path.template, \"/\") + tpl\n\t\t}\n\t}\n\trr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, q := range r.regexp.queries {\n\t\tif err = uniqueVars(rr.varsN, q.varsN); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif matchHost {\n\t\tif r.regexp.path != nil {\n\t\t\tif err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tr.regexp.host = rr\n\t} else {\n\t\tif r.regexp.host != nil {\n\t\t\tif err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif matchQuery {\n\t\t\tr.regexp.queries = append(r.regexp.queries, rr)\n\t\t} else {\n\t\t\tr.regexp.path = rr\n\t\t}\n\t}\n\tr.addMatcher(rr)\n\treturn nil\n}\n\n// Headers --------------------------------------------------------------------\n\n// headerMatcher matches the request against header values.\ntype headerMatcher map[string]string\n\nfunc (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {\n\treturn matchMap(m, r.Header, true)\n}\n\n// Headers adds a matcher for request header values.\n// It accepts a sequence of key/value pairs to be matched. For example:\n//\n//     r := mux.NewRouter()\n//     r.Headers(\"Content-Type\", \"application/json\",\n//               \"X-Requested-With\", \"XMLHttpRequest\")\n//\n// The above route will only match if both request header values match.\n//\n// It the value is an empty string, it will match any value if the key is set.\nfunc (r *Route) Headers(pairs ...string) *Route {\n\tif r.err == nil {\n\t\tvar headers map[string]string\n\t\theaders, r.err = mapFromPairs(pairs...)\n\t\treturn r.addMatcher(headerMatcher(headers))\n\t}\n\treturn r\n}\n\n// Host -----------------------------------------------------------------------\n\n// Host adds a matcher for the URL host.\n// It accepts a template with zero or more URL variables enclosed by {}.\n// Variables can define an optional regexp pattern to be matched:\n//\n// - {name} matches anything until the next dot.\n//\n// - {name:pattern} matches the given regexp pattern.\n//\n// For example:\n//\n//     r := mux.NewRouter()\n//     r.Host(\"www.domain.com\")\n//     r.Host(\"{subdomain}.domain.com\")\n//     r.Host(\"{subdomain:[a-z]+}.domain.com\")\n//\n// Variable names must be unique in a given route. They can be retrieved\n// calling mux.Vars(request).\nfunc (r *Route) Host(tpl string) *Route {\n\tr.err = r.addRegexpMatcher(tpl, true, false, false)\n\treturn r\n}\n\n// MatcherFunc ----------------------------------------------------------------\n\n// MatcherFunc is the function signature used by custom matchers.\ntype MatcherFunc func(*http.Request, *RouteMatch) bool\n\nfunc (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {\n\treturn m(r, match)\n}\n\n// MatcherFunc adds a custom function to be used as request matcher.\nfunc (r *Route) MatcherFunc(f MatcherFunc) *Route {\n\treturn r.addMatcher(f)\n}\n\n// Methods --------------------------------------------------------------------\n\n// methodMatcher matches the request against HTTP methods.\ntype methodMatcher []string\n\nfunc (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {\n\treturn matchInArray(m, r.Method)\n}\n\n// Methods adds a matcher for HTTP methods.\n// It accepts a sequence of one or more methods to be matched, e.g.:\n// \"GET\", \"POST\", \"PUT\".\nfunc (r *Route) Methods(methods ...string) *Route {\n\tfor k, v := range methods {\n\t\tmethods[k] = strings.ToUpper(v)\n\t}\n\treturn r.addMatcher(methodMatcher(methods))\n}\n\n// Path -----------------------------------------------------------------------\n\n// Path adds a matcher for the URL path.\n// It accepts a template with zero or more URL variables enclosed by {}. The\n// template must start with a \"/\".\n// Variables can define an optional regexp pattern to be matched:\n//\n// - {name} matches anything until the next slash.\n//\n// - {name:pattern} matches the given regexp pattern.\n//\n// For example:\n//\n//     r := mux.NewRouter()\n//     r.Path(\"/products/\").Handler(ProductsHandler)\n//     r.Path(\"/products/{key}\").Handler(ProductsHandler)\n//     r.Path(\"/articles/{category}/{id:[0-9]+}\").\n//       Handler(ArticleHandler)\n//\n// Variable names must be unique in a given route. They can be retrieved\n// calling mux.Vars(request).\nfunc (r *Route) Path(tpl string) *Route {\n\tr.err = r.addRegexpMatcher(tpl, false, false, false)\n\treturn r\n}\n\n// PathPrefix -----------------------------------------------------------------\n\n// PathPrefix adds a matcher for the URL path prefix. This matches if the given\n// template is a prefix of the full URL path. See Route.Path() for details on\n// the tpl argument.\n//\n// Note that it does not treat slashes specially (\"/foobar/\" will be matched by\n// the prefix \"/foo\") so you may want to use a trailing slash here.\n//\n// Also note that the setting of Router.StrictSlash() has no effect on routes\n// with a PathPrefix matcher.\nfunc (r *Route) PathPrefix(tpl string) *Route {\n\tr.err = r.addRegexpMatcher(tpl, false, true, false)\n\treturn r\n}\n\n// Query ----------------------------------------------------------------------\n\n// Queries adds a matcher for URL query values.\n// It accepts a sequence of key/value pairs. Values may define variables.\n// For example:\n//\n//     r := mux.NewRouter()\n//     r.Queries(\"foo\", \"bar\", \"id\", \"{id:[0-9]+}\")\n//\n// The above route will only match if the URL contains the defined queries\n// values, e.g.: ?foo=bar&id=42.\n//\n// It the value is an empty string, it will match any value if the key is set.\n//\n// Variables can define an optional regexp pattern to be matched:\n//\n// - {name} matches anything until the next slash.\n//\n// - {name:pattern} matches the given regexp pattern.\nfunc (r *Route) Queries(pairs ...string) *Route {\n\tlength := len(pairs)\n\tif length%2 != 0 {\n\t\tr.err = fmt.Errorf(\n\t\t\t\"mux: number of parameters must be multiple of 2, got %v\", pairs)\n\t\treturn nil\n\t}\n\tfor i := 0; i < length; i += 2 {\n\t\tif r.err = r.addRegexpMatcher(pairs[i]+\"=\"+pairs[i+1], false, true, true); r.err != nil {\n\t\t\treturn r\n\t\t}\n\t}\n\n\treturn r\n}\n\n// Schemes --------------------------------------------------------------------\n\n// schemeMatcher matches the request against URL schemes.\ntype schemeMatcher []string\n\nfunc (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {\n\treturn matchInArray(m, r.URL.Scheme)\n}\n\n// Schemes adds a matcher for URL schemes.\n// It accepts a sequence of schemes to be matched, e.g.: \"http\", \"https\".\nfunc (r *Route) Schemes(schemes ...string) *Route {\n\tfor k, v := range schemes {\n\t\tschemes[k] = strings.ToLower(v)\n\t}\n\treturn r.addMatcher(schemeMatcher(schemes))\n}\n\n// BuildVarsFunc --------------------------------------------------------------\n\n// BuildVarsFunc is the function signature used by custom build variable\n// functions (which can modify route variables before a route's URL is built).\ntype BuildVarsFunc func(map[string]string) map[string]string\n\n// BuildVarsFunc adds a custom function to be used to modify build variables\n// before a route's URL is built.\nfunc (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {\n\tr.buildVarsFunc = f\n\treturn r\n}\n\n// Subrouter ------------------------------------------------------------------\n\n// Subrouter creates a subrouter for the route.\n//\n// It will test the inner routes only if the parent route matched. For example:\n//\n//     r := mux.NewRouter()\n//     s := r.Host(\"www.domain.com\").Subrouter()\n//     s.HandleFunc(\"/products/\", ProductsHandler)\n//     s.HandleFunc(\"/products/{key}\", ProductHandler)\n//     s.HandleFunc(\"/articles/{category}/{id:[0-9]+}\"), ArticleHandler)\n//\n// Here, the routes registered in the subrouter won't be tested if the host\n// doesn't match.\nfunc (r *Route) Subrouter() *Router {\n\trouter := &Router{parent: r, strictSlash: r.strictSlash}\n\tr.addMatcher(router)\n\treturn router\n}\n\n// ----------------------------------------------------------------------------\n// URL building\n// ----------------------------------------------------------------------------\n\n// URL builds a URL for the route.\n//\n// It accepts a sequence of key/value pairs for the route variables. For\n// example, given this route:\n//\n//     r := mux.NewRouter()\n//     r.HandleFunc(\"/articles/{category}/{id:[0-9]+}\", ArticleHandler).\n//       Name(\"article\")\n//\n// ...a URL for it can be built using:\n//\n//     url, err := r.Get(\"article\").URL(\"category\", \"technology\", \"id\", \"42\")\n//\n// ...which will return an url.URL with the following path:\n//\n//     \"/articles/technology/42\"\n//\n// This also works for host variables:\n//\n//     r := mux.NewRouter()\n//     r.Host(\"{subdomain}.domain.com\").\n//       HandleFunc(\"/articles/{category}/{id:[0-9]+}\", ArticleHandler).\n//       Name(\"article\")\n//\n//     // url.String() will be \"http://news.domain.com/articles/technology/42\"\n//     url, err := r.Get(\"article\").URL(\"subdomain\", \"news\",\n//                                      \"category\", \"technology\",\n//                                      \"id\", \"42\")\n//\n// All variables defined in the route are required, and their values must\n// conform to the corresponding patterns.\nfunc (r *Route) URL(pairs ...string) (*url.URL, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.regexp == nil {\n\t\treturn nil, errors.New(\"mux: route doesn't have a host or path\")\n\t}\n\tvalues, err := r.prepareVars(pairs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar scheme, host, path string\n\tif r.regexp.host != nil {\n\t\t// Set a default scheme.\n\t\tscheme = \"http\"\n\t\tif host, err = r.regexp.host.url(values); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif r.regexp.path != nil {\n\t\tif path, err = r.regexp.path.url(values); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &url.URL{\n\t\tScheme: scheme,\n\t\tHost:   host,\n\t\tPath:   path,\n\t}, nil\n}\n\n// URLHost builds the host part of the URL for a route. See Route.URL().\n//\n// The route must have a host defined.\nfunc (r *Route) URLHost(pairs ...string) (*url.URL, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.regexp == nil || r.regexp.host == nil {\n\t\treturn nil, errors.New(\"mux: route doesn't have a host\")\n\t}\n\tvalues, err := r.prepareVars(pairs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thost, err := r.regexp.host.url(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &url.URL{\n\t\tScheme: \"http\",\n\t\tHost:   host,\n\t}, nil\n}\n\n// URLPath builds the path part of the URL for a route. See Route.URL().\n//\n// The route must have a path defined.\nfunc (r *Route) URLPath(pairs ...string) (*url.URL, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.regexp == nil || r.regexp.path == nil {\n\t\treturn nil, errors.New(\"mux: route doesn't have a path\")\n\t}\n\tvalues, err := r.prepareVars(pairs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpath, err := r.regexp.path.url(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &url.URL{\n\t\tPath: path,\n\t}, nil\n}\n\n// prepareVars converts the route variable pairs into a map. If the route has a\n// BuildVarsFunc, it is invoked.\nfunc (r *Route) prepareVars(pairs ...string) (map[string]string, error) {\n\tm, err := mapFromPairs(pairs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.buildVars(m), nil\n}\n\nfunc (r *Route) buildVars(m map[string]string) map[string]string {\n\tif r.parent != nil {\n\t\tm = r.parent.buildVars(m)\n\t}\n\tif r.buildVarsFunc != nil {\n\t\tm = r.buildVarsFunc(m)\n\t}\n\treturn m\n}\n\n// ----------------------------------------------------------------------------\n// parentRoute\n// ----------------------------------------------------------------------------\n\n// parentRoute allows routes to know about parent host and path definitions.\ntype parentRoute interface {\n\tgetNamedRoutes() map[string]*Route\n\tgetRegexpGroup() *routeRegexpGroup\n\tbuildVars(map[string]string) map[string]string\n}\n\n// getNamedRoutes returns the map where named routes are registered.\nfunc (r *Route) getNamedRoutes() map[string]*Route {\n\tif r.parent == nil {\n\t\t// During tests router is not always set.\n\t\tr.parent = NewRouter()\n\t}\n\treturn r.parent.getNamedRoutes()\n}\n\n// getRegexpGroup returns regexp definitions from this route.\nfunc (r *Route) getRegexpGroup() *routeRegexpGroup {\n\tif r.regexp == nil {\n\t\tif r.parent == nil {\n\t\t\t// During tests router is not always set.\n\t\t\tr.parent = NewRouter()\n\t\t}\n\t\tregexp := r.parent.getRegexpGroup()\n\t\tif regexp == nil {\n\t\t\tr.regexp = new(routeRegexpGroup)\n\t\t} else {\n\t\t\t// Copy.\n\t\t\tr.regexp = &routeRegexpGroup{\n\t\t\t\thost:    regexp.host,\n\t\t\t\tpath:    regexp.path,\n\t\t\t\tqueries: regexp.queries,\n\t\t\t}\n\t\t}\n\t}\n\treturn r.regexp\n}\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/LICENSE",
    "content": "This software is licensed under the LGPLv3, included below.\n\nAs a special exception to the GNU Lesser General Public License version 3\n(\"LGPL3\"), the copyright holders of this Library give you permission to\nconvey to a third party a Combined Work that links statically or dynamically\nto this Library without providing any Minimal Corresponding Source or\nMinimal Application Code as set out in 4d or providing the installation\ninformation set out in section 4e, provided that you comply with the other\nprovisions of LGPL3 and provided that you meet, for the Application the\nterms and conditions of the license(s) which apply to the Application.\n\nExcept as stated in this special exception, the provisions of LGPL3 will\ncontinue to comply in full to this Library. If you modify this Library, you\nmay apply this exception to your version of this Library, but you are not\nobliged to do so. If you do not wish to do so, delete this exception\nstatement from your version. This exception does not (and cannot) modify any\nlicense terms which apply to the Application, with which you must still\ncomply.\n\n\n                   GNU LESSER GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n\n  This version of the GNU Lesser General Public License incorporates\nthe terms and conditions of version 3 of the GNU General Public\nLicense, supplemented by the additional permissions listed below.\n\n  0. Additional Definitions.\n\n  As used herein, \"this License\" refers to version 3 of the GNU Lesser\nGeneral Public License, and the \"GNU GPL\" refers to version 3 of the GNU\nGeneral Public License.\n\n  \"The Library\" refers to a covered work governed by this License,\nother than an Application or a Combined Work as defined below.\n\n  An \"Application\" is any work that makes use of an interface provided\nby the Library, but which is not otherwise based on the Library.\nDefining a subclass of a class defined by the Library is deemed a mode\nof using an interface provided by the Library.\n\n  A \"Combined Work\" is a work produced by combining or linking an\nApplication with the Library.  The particular version of the Library\nwith which the Combined Work was made is also called the \"Linked\nVersion\".\n\n  The \"Minimal Corresponding Source\" for a Combined Work means the\nCorresponding Source for the Combined Work, excluding any source code\nfor portions of the Combined Work that, considered in isolation, are\nbased on the Application, and not on the Linked Version.\n\n  The \"Corresponding Application Code\" for a Combined Work means the\nobject code and/or source code for the Application, including any data\nand utility programs needed for reproducing the Combined Work from the\nApplication, but excluding the System Libraries of the Combined Work.\n\n  1. Exception to Section 3 of the GNU GPL.\n\n  You may convey a covered work under sections 3 and 4 of this License\nwithout being bound by section 3 of the GNU GPL.\n\n  2. Conveying Modified Versions.\n\n  If you modify a copy of the Library, and, in your modifications, a\nfacility refers to a function or data to be supplied by an Application\nthat uses the facility (other than as an argument passed when the\nfacility is invoked), then you may convey a copy of the modified\nversion:\n\n   a) under this License, provided that you make a good faith effort to\n   ensure that, in the event an Application does not supply the\n   function or data, the facility still operates, and performs\n   whatever part of its purpose remains meaningful, or\n\n   b) under the GNU GPL, with none of the additional permissions of\n   this License applicable to that copy.\n\n  3. Object Code Incorporating Material from Library Header Files.\n\n  The object code form of an Application may incorporate material from\na header file that is part of the Library.  You may convey such object\ncode under terms of your choice, provided that, if the incorporated\nmaterial is not limited to numerical parameters, data structure\nlayouts and accessors, or small macros, inline functions and templates\n(ten or fewer lines in length), you do both of the following:\n\n   a) Give prominent notice with each copy of the object code that the\n   Library is used in it and that the Library and its use are\n   covered by this License.\n\n   b) Accompany the object code with a copy of the GNU GPL and this license\n   document.\n\n  4. Combined Works.\n\n  You may convey a Combined Work under terms of your choice that,\ntaken together, effectively do not restrict modification of the\nportions of the Library contained in the Combined Work and reverse\nengineering for debugging such modifications, if you also do each of\nthe following:\n\n   a) Give prominent notice with each copy of the Combined Work that\n   the Library is used in it and that the Library and its use are\n   covered by this License.\n\n   b) Accompany the Combined Work with a copy of the GNU GPL and this license\n   document.\n\n   c) For a Combined Work that displays copyright notices during\n   execution, include the copyright notice for the Library among\n   these notices, as well as a reference directing the user to the\n   copies of the GNU GPL and this license document.\n\n   d) Do one of the following:\n\n       0) Convey the Minimal Corresponding Source under the terms of this\n       License, and the Corresponding Application Code in a form\n       suitable for, and under terms that permit, the user to\n       recombine or relink the Application with a modified version of\n       the Linked Version to produce a modified Combined Work, in the\n       manner specified by section 6 of the GNU GPL for conveying\n       Corresponding Source.\n\n       1) Use a suitable shared library mechanism for linking with the\n       Library.  A suitable mechanism is one that (a) uses at run time\n       a copy of the Library already present on the user's computer\n       system, and (b) will operate properly with a modified version\n       of the Library that is interface-compatible with the Linked\n       Version.\n\n   e) Provide Installation Information, but only if you would otherwise\n   be required to provide such information under section 6 of the\n   GNU GPL, and only to the extent that such information is\n   necessary to install and execute a modified version of the\n   Combined Work produced by recombining or relinking the\n   Application with a modified version of the Linked Version. (If\n   you use option 4d0, the Installation Information must accompany\n   the Minimal Corresponding Source and Corresponding Application\n   Code. If you use option 4d1, you must provide the Installation\n   Information in the manner specified by section 6 of the GNU GPL\n   for conveying Corresponding Source.)\n\n  5. Combined Libraries.\n\n  You may place library facilities that are a work based on the\nLibrary side by side in a single library together with other library\nfacilities that are not Applications and are not covered by this\nLicense, and convey such a combined library under terms of your\nchoice, if you do both of the following:\n\n   a) Accompany the combined library with a copy of the same work based\n   on the Library, uncombined with any other library facilities,\n   conveyed under the terms of this License.\n\n   b) Give prominent notice with the combined library that part of it\n   is a work based on the Library, and explaining where to find the\n   accompanying uncombined form of the same work.\n\n  6. Revised Versions of the GNU Lesser General Public License.\n\n  The Free Software Foundation may publish revised and/or new versions\nof the GNU Lesser General Public License from time to time. Such new\nversions will be similar in spirit to the present version, but may\ndiffer in detail to address new problems or concerns.\n\n  Each version is given a distinguishing version number. If the\nLibrary as you received it specifies that a certain numbered version\nof the GNU Lesser General Public License \"or any later version\"\napplies to it, you have the option of following the terms and\nconditions either of that published version or of any later version\npublished by the Free Software Foundation. If the Library as you\nreceived it does not specify a version number of the GNU Lesser\nGeneral Public License, you may choose any version of the GNU Lesser\nGeneral Public License ever published by the Free Software Foundation.\n\n  If the Library as you received it specifies that a proxy can decide\nwhether future versions of the GNU Lesser General Public License shall\napply, that proxy's public statement of acceptance of any version is\npermanent authorization for you to choose that version for the\nLibrary.\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/aws/attempt.go",
    "content": "package aws\n\nimport (\n\t\"time\"\n)\n\n// AttemptStrategy represents a strategy for waiting for an action\n// to complete successfully. This is an internal type used by the\n// implementation of other goamz packages.\ntype AttemptStrategy struct {\n\tTotal time.Duration // total duration of attempt.\n\tDelay time.Duration // interval between each try in the burst.\n\tMin   int           // minimum number of retries; overrides Total\n}\n\ntype Attempt struct {\n\tstrategy AttemptStrategy\n\tlast     time.Time\n\tend      time.Time\n\tforce    bool\n\tcount    int\n}\n\n// Start begins a new sequence of attempts for the given strategy.\nfunc (s AttemptStrategy) Start() *Attempt {\n\tnow := time.Now()\n\treturn &Attempt{\n\t\tstrategy: s,\n\t\tlast:     now,\n\t\tend:      now.Add(s.Total),\n\t\tforce:    true,\n\t}\n}\n\n// Next waits until it is time to perform the next attempt or returns\n// false if it is time to stop trying.\nfunc (a *Attempt) Next() bool {\n\tnow := time.Now()\n\tsleep := a.nextSleep(now)\n\tif !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {\n\t\treturn false\n\t}\n\ta.force = false\n\tif sleep > 0 && a.count > 0 {\n\t\ttime.Sleep(sleep)\n\t\tnow = time.Now()\n\t}\n\ta.count++\n\ta.last = now\n\treturn true\n}\n\nfunc (a *Attempt) nextSleep(now time.Time) time.Duration {\n\tsleep := a.strategy.Delay - now.Sub(a.last)\n\tif sleep < 0 {\n\t\treturn 0\n\t}\n\treturn sleep\n}\n\n// HasNext returns whether another attempt will be made if the current\n// one fails. If it returns true, the following call to Next is\n// guaranteed to return true.\nfunc (a *Attempt) HasNext() bool {\n\tif a.force || a.strategy.Min > a.count {\n\t\treturn true\n\t}\n\tnow := time.Now()\n\tif now.Add(a.nextSleep(now)).Before(a.end) {\n\t\ta.force = true\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/aws/aws.go",
    "content": "//\n// goamz - Go packages to interact with the Amazon Web Services.\n//\n//   https://wiki.ubuntu.com/goamz\n//\n// Copyright (c) 2011 Canonical Ltd.\n//\n// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>\n//\npackage aws\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\n\t\"github.com/vaughan0/go-ini\"\n)\n\n// Region defines the URLs where AWS services may be accessed.\n//\n// See http://goo.gl/d8BP1 for more details.\ntype Region struct {\n\tName                 string // the canonical name of this region.\n\tEC2Endpoint          string\n\tS3Endpoint           string\n\tS3BucketEndpoint     string // Not needed by AWS S3. Use ${bucket} for bucket name.\n\tS3LocationConstraint bool   // true if this region requires a LocationConstraint declaration.\n\tS3LowercaseBucket    bool   // true if the region requires bucket names to be lower case.\n\tSDBEndpoint          string\n\tSNSEndpoint          string\n\tSQSEndpoint          string\n\tIAMEndpoint          string\n\tELBEndpoint          string\n\tAutoScalingEndpoint  string\n\tRdsEndpoint          string\n\tRoute53Endpoint      string\n}\n\nvar USGovWest = Region{\n\t\"us-gov-west-1\",\n\t\"https://ec2.us-gov-west-1.amazonaws.com\",\n\t\"https://s3-fips-us-gov-west-1.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"\",\n\t\"https://sns.us-gov-west-1.amazonaws.com\",\n\t\"https://sqs.us-gov-west-1.amazonaws.com\",\n\t\"https://iam.us-gov.amazonaws.com\",\n\t\"https://elasticloadbalancing.us-gov-west-1.amazonaws.com\",\n\t\"https://autoscaling.us-gov-west-1.amazonaws.com\",\n\t\"https://rds.us-gov-west-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar USEast = Region{\n\t\"us-east-1\",\n\t\"https://ec2.us-east-1.amazonaws.com\",\n\t\"https://s3.amazonaws.com\",\n\t\"\",\n\tfalse,\n\tfalse,\n\t\"https://sdb.amazonaws.com\",\n\t\"https://sns.us-east-1.amazonaws.com\",\n\t\"https://sqs.us-east-1.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.us-east-1.amazonaws.com\",\n\t\"https://autoscaling.us-east-1.amazonaws.com\",\n\t\"https://rds.us-east-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar USWest = Region{\n\t\"us-west-1\",\n\t\"https://ec2.us-west-1.amazonaws.com\",\n\t\"https://s3-us-west-1.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"https://sdb.us-west-1.amazonaws.com\",\n\t\"https://sns.us-west-1.amazonaws.com\",\n\t\"https://sqs.us-west-1.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.us-west-1.amazonaws.com\",\n\t\"https://autoscaling.us-west-1.amazonaws.com\",\n\t\"https://rds.us-west-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar USWest2 = Region{\n\t\"us-west-2\",\n\t\"https://ec2.us-west-2.amazonaws.com\",\n\t\"https://s3-us-west-2.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"https://sdb.us-west-2.amazonaws.com\",\n\t\"https://sns.us-west-2.amazonaws.com\",\n\t\"https://sqs.us-west-2.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.us-west-2.amazonaws.com\",\n\t\"https://autoscaling.us-west-2.amazonaws.com\",\n\t\"https://rds.us-west-2.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar EUWest = Region{\n\t\"eu-west-1\",\n\t\"https://ec2.eu-west-1.amazonaws.com\",\n\t\"https://s3-eu-west-1.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"https://sdb.eu-west-1.amazonaws.com\",\n\t\"https://sns.eu-west-1.amazonaws.com\",\n\t\"https://sqs.eu-west-1.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.eu-west-1.amazonaws.com\",\n\t\"https://autoscaling.eu-west-1.amazonaws.com\",\n\t\"https://rds.eu-west-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar EUCentral = Region{\n\t\"eu-central-1\",\n\t\"https://ec2.eu-central-1.amazonaws.com\",\n\t\"https://s3-eu-central-1.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"\",\n\t\"https://sns.eu-central-1.amazonaws.com\",\n\t\"https://sqs.eu-central-1.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.eu-central-1.amazonaws.com\",\n\t\"https://autoscaling.eu-central-1.amazonaws.com\",\n\t\"https://rds.eu-central-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar APSoutheast = Region{\n\t\"ap-southeast-1\",\n\t\"https://ec2.ap-southeast-1.amazonaws.com\",\n\t\"https://s3-ap-southeast-1.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"https://sdb.ap-southeast-1.amazonaws.com\",\n\t\"https://sns.ap-southeast-1.amazonaws.com\",\n\t\"https://sqs.ap-southeast-1.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.ap-southeast-1.amazonaws.com\",\n\t\"https://autoscaling.ap-southeast-1.amazonaws.com\",\n\t\"https://rds.ap-southeast-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar APSoutheast2 = Region{\n\t\"ap-southeast-2\",\n\t\"https://ec2.ap-southeast-2.amazonaws.com\",\n\t\"https://s3-ap-southeast-2.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"https://sdb.ap-southeast-2.amazonaws.com\",\n\t\"https://sns.ap-southeast-2.amazonaws.com\",\n\t\"https://sqs.ap-southeast-2.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.ap-southeast-2.amazonaws.com\",\n\t\"https://autoscaling.ap-southeast-2.amazonaws.com\",\n\t\"https://rds.ap-southeast-2.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar APNortheast = Region{\n\t\"ap-northeast-1\",\n\t\"https://ec2.ap-northeast-1.amazonaws.com\",\n\t\"https://s3-ap-northeast-1.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"https://sdb.ap-northeast-1.amazonaws.com\",\n\t\"https://sns.ap-northeast-1.amazonaws.com\",\n\t\"https://sqs.ap-northeast-1.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.ap-northeast-1.amazonaws.com\",\n\t\"https://autoscaling.ap-northeast-1.amazonaws.com\",\n\t\"https://rds.ap-northeast-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar SAEast = Region{\n\t\"sa-east-1\",\n\t\"https://ec2.sa-east-1.amazonaws.com\",\n\t\"https://s3-sa-east-1.amazonaws.com\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"https://sdb.sa-east-1.amazonaws.com\",\n\t\"https://sns.sa-east-1.amazonaws.com\",\n\t\"https://sqs.sa-east-1.amazonaws.com\",\n\t\"https://iam.amazonaws.com\",\n\t\"https://elasticloadbalancing.sa-east-1.amazonaws.com\",\n\t\"https://autoscaling.sa-east-1.amazonaws.com\",\n\t\"https://rds.sa-east-1.amazonaws.com\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar CNNorth = Region{\n\t\"cn-north-1\",\n\t\"https://ec2.cn-north-1.amazonaws.com.cn\",\n\t\"https://s3.cn-north-1.amazonaws.com.cn\",\n\t\"\",\n\ttrue,\n\ttrue,\n\t\"\",\n\t\"https://sns.cn-north-1.amazonaws.com.cn\",\n\t\"https://sqs.cn-north-1.amazonaws.com.cn\",\n\t\"https://iam.cn-north-1.amazonaws.com.cn\",\n\t\"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn\",\n\t\"https://autoscaling.cn-north-1.amazonaws.com.cn\",\n\t\"https://rds.cn-north-1.amazonaws.com.cn\",\n\t\"https://route53.amazonaws.com\",\n}\n\nvar Regions = map[string]Region{\n\tAPNortheast.Name:  APNortheast,\n\tAPSoutheast.Name:  APSoutheast,\n\tAPSoutheast2.Name: APSoutheast2,\n\tEUWest.Name:       EUWest,\n\tEUCentral.Name:    EUCentral,\n\tUSEast.Name:       USEast,\n\tUSWest.Name:       USWest,\n\tUSWest2.Name:      USWest2,\n\tSAEast.Name:       SAEast,\n\tUSGovWest.Name:    USGovWest,\n\tCNNorth.Name:      CNNorth,\n}\n\ntype Auth struct {\n\tAccessKey, SecretKey, Token string\n}\n\nvar unreserved = make([]bool, 128)\nvar hex = \"0123456789ABCDEF\"\n\nfunc init() {\n\t// RFC3986\n\tu := \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~\"\n\tfor _, c := range u {\n\t\tunreserved[c] = true\n\t}\n}\n\ntype credentials struct {\n\tCode            string\n\tLastUpdated     string\n\tType            string\n\tAccessKeyId     string\n\tSecretAccessKey string\n\tToken           string\n\tExpiration      string\n}\n\n// GetMetaData retrieves instance metadata about the current machine.\n//\n// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.\nfunc GetMetaData(path string) (contents []byte, err error) {\n\turl := \"http://169.254.169.254/latest/meta-data/\" + path\n\n\tresp, err := RetryingClient.Get(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\terr = fmt.Errorf(\"Code %d returned for url %s\", resp.StatusCode, url)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn []byte(body), err\n}\n\nfunc getInstanceCredentials() (cred credentials, err error) {\n\tcredentialPath := \"iam/security-credentials/\"\n\n\t// Get the instance role\n\trole, err := GetMetaData(credentialPath)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Get the instance role credentials\n\tcredentialJSON, err := GetMetaData(credentialPath + string(role))\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal([]byte(credentialJSON), &cred)\n\treturn\n}\n\n// GetAuth creates an Auth based on either passed in credentials,\n// environment information or instance based role credentials.\nfunc GetAuth(accessKey string, secretKey string) (auth Auth, err error) {\n\t// First try passed in credentials\n\tif accessKey != \"\" && secretKey != \"\" {\n\t\treturn Auth{accessKey, secretKey, \"\"}, nil\n\t}\n\n\t// Next try to get auth from the environment\n\tauth, err = SharedAuth()\n\tif err == nil {\n\t\t// Found auth, return\n\t\treturn\n\t}\n\n\t// Next try to get auth from the environment\n\tauth, err = EnvAuth()\n\tif err == nil {\n\t\t// Found auth, return\n\t\treturn\n\t}\n\n\t// Next try getting auth from the instance role\n\tcred, err := getInstanceCredentials()\n\tif err == nil {\n\t\t// Found auth, return\n\t\tauth.AccessKey = cred.AccessKeyId\n\t\tauth.SecretKey = cred.SecretAccessKey\n\t\tauth.Token = cred.Token\n\t\treturn\n\t}\n\terr = errors.New(\"No valid AWS authentication found\")\n\treturn\n}\n\n// SharedAuth creates an Auth based on shared credentials stored in\n// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to\n// select the profile.\nfunc SharedAuth() (auth Auth, err error) {\n\tvar profileName = os.Getenv(\"AWS_PROFILE\")\n\n\tif profileName == \"\" {\n\t\tprofileName = \"default\"\n\t}\n\n\tvar credentialsFile = os.Getenv(\"AWS_CREDENTIAL_FILE\")\n\tif credentialsFile == \"\" {\n\t\tvar homeDir = os.Getenv(\"HOME\")\n\t\tif homeDir == \"\" {\n\t\t\terr = errors.New(\"Could not get HOME\")\n\t\t\treturn\n\t\t}\n\t\tcredentialsFile = homeDir + \"/.aws/credentials\"\n\t}\n\n\tfile, err := ini.LoadFile(credentialsFile)\n\tif err != nil {\n\t\terr = errors.New(\"Couldn't parse AWS credentials file\")\n\t\treturn\n\t}\n\n\tvar profile = file[profileName]\n\tif profile == nil {\n\t\terr = errors.New(\"Couldn't find profile in AWS credentials file\")\n\t\treturn\n\t}\n\n\tauth.AccessKey = profile[\"aws_access_key_id\"]\n\tauth.SecretKey = profile[\"aws_secret_access_key\"]\n\n\tif auth.AccessKey == \"\" {\n\t\terr = errors.New(\"AWS_ACCESS_KEY_ID not found in environment in credentials file\")\n\t}\n\tif auth.SecretKey == \"\" {\n\t\terr = errors.New(\"AWS_SECRET_ACCESS_KEY not found in credentials file\")\n\t}\n\treturn\n}\n\n// EnvAuth creates an Auth based on environment information.\n// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment\n// For accounts that require a security token, it is read from AWS_SECURITY_TOKEN\n// variables are used.\nfunc EnvAuth() (auth Auth, err error) {\n\tauth.AccessKey = os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\tif auth.AccessKey == \"\" {\n\t\tauth.AccessKey = os.Getenv(\"AWS_ACCESS_KEY\")\n\t}\n\n\tauth.SecretKey = os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\tif auth.SecretKey == \"\" {\n\t\tauth.SecretKey = os.Getenv(\"AWS_SECRET_KEY\")\n\t}\n\n\tauth.Token = os.Getenv(\"AWS_SECURITY_TOKEN\")\n\n\tif auth.AccessKey == \"\" {\n\t\terr = errors.New(\"AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment\")\n\t}\n\tif auth.SecretKey == \"\" {\n\t\terr = errors.New(\"AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment\")\n\t}\n\treturn\n}\n\n// Encode takes a string and URI-encodes it in a way suitable\n// to be used in AWS signatures.\nfunc Encode(s string) string {\n\tencode := false\n\tfor i := 0; i != len(s); i++ {\n\t\tc := s[i]\n\t\tif c > 127 || !unreserved[c] {\n\t\t\tencode = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !encode {\n\t\treturn s\n\t}\n\te := make([]byte, len(s)*3)\n\tei := 0\n\tfor i := 0; i != len(s); i++ {\n\t\tc := s[i]\n\t\tif c > 127 || !unreserved[c] {\n\t\t\te[ei] = '%'\n\t\t\te[ei+1] = hex[c>>4]\n\t\t\te[ei+2] = hex[c&0xF]\n\t\t\tei += 3\n\t\t} else {\n\t\t\te[ei] = c\n\t\t\tei += 1\n\t\t}\n\t}\n\treturn string(e[:ei])\n}\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/aws/client.go",
    "content": "package aws\n\nimport (\n\t\"math\"\n\t\"net\"\n\t\"net/http\"\n\t\"time\"\n)\n\ntype RetryableFunc func(*http.Request, *http.Response, error) bool\ntype WaitFunc func(try int)\ntype DeadlineFunc func() time.Time\n\ntype ResilientTransport struct {\n\t// Timeout is the maximum amount of time a dial will wait for\n\t// a connect to complete.\n\t//\n\t// The default is no timeout.\n\t//\n\t// With or without a timeout, the operating system may impose\n\t// its own earlier timeout. For instance, TCP timeouts are\n\t// often around 3 minutes.\n\tDialTimeout time.Duration\n\n\t// MaxTries, if non-zero, specifies the number of times we will retry on\n\t// failure. Retries are only attempted for temporary network errors or known\n\t// safe failures.\n\tMaxTries    int\n\tDeadline    DeadlineFunc\n\tShouldRetry RetryableFunc\n\tWait        WaitFunc\n\ttransport   *http.Transport\n}\n\n// Convenience method for creating an http client\nfunc NewClient(rt *ResilientTransport) *http.Client {\n\trt.transport = &http.Transport{\n\t\tDial: func(netw, addr string) (net.Conn, error) {\n\t\t\tc, err := net.DialTimeout(netw, addr, rt.DialTimeout)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.SetDeadline(rt.Deadline())\n\t\t\treturn c, nil\n\t\t},\n\t\tDisableKeepAlives: true,\n\t\tProxy:             http.ProxyFromEnvironment,\n\t}\n\t// TODO: Would be nice is ResilientTransport allowed clients to initialize\n\t// with http.Transport attributes.\n\treturn &http.Client{\n\t\tTransport: rt,\n\t}\n}\n\nvar retryingTransport = &ResilientTransport{\n\tDeadline: func() time.Time {\n\t\treturn time.Now().Add(5 * time.Second)\n\t},\n\tDialTimeout: 10 * time.Second,\n\tMaxTries:    3,\n\tShouldRetry: awsRetry,\n\tWait:        ExpBackoff,\n}\n\n// Exported default client\nvar RetryingClient = NewClient(retryingTransport)\n\nfunc (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treturn t.tries(req)\n}\n\n// Retry a request a maximum of t.MaxTries times.\n// We'll only retry if the proper criteria are met.\n// If a wait function is specified, wait that amount of time\n// In between requests.\nfunc (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {\n\tfor try := 0; try < t.MaxTries; try += 1 {\n\t\tres, err = t.transport.RoundTrip(req)\n\n\t\tif !t.ShouldRetry(req, res, err) {\n\t\t\tbreak\n\t\t}\n\t\tif res != nil {\n\t\t\tres.Body.Close()\n\t\t}\n\t\tif t.Wait != nil {\n\t\t\tt.Wait(try)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc ExpBackoff(try int) {\n\ttime.Sleep(100 * time.Millisecond *\n\t\ttime.Duration(math.Exp2(float64(try))))\n}\n\nfunc LinearBackoff(try int) {\n\ttime.Sleep(time.Duration(try*100) * time.Millisecond)\n}\n\n// Decide if we should retry a request.\n// In general, the criteria for retrying a request is described here\n// http://docs.aws.amazon.com/general/latest/gr/api-retries.html\nfunc awsRetry(req *http.Request, res *http.Response, err error) bool {\n\tretry := false\n\n\t// Retry if there's a temporary network error.\n\tif neterr, ok := err.(net.Error); ok {\n\t\tif neterr.Temporary() {\n\t\t\tretry = true\n\t\t}\n\t}\n\n\t// Retry if we get a 5xx series error.\n\tif res != nil {\n\t\tif res.StatusCode >= 500 && res.StatusCode < 600 {\n\t\t\tretry = true\n\t\t}\n\t}\n\n\treturn retry\n}\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/s3/multi.go",
    "content": "package s3\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"encoding/xml\"\n\t\"errors\"\n\t\"io\"\n\t\"sort\"\n\t\"strconv\"\n)\n\n// Multi represents an unfinished multipart upload.\n//\n// Multipart uploads allow sending big objects in smaller chunks.\n// After all parts have been sent, the upload must be explicitly\n// completed by calling Complete with the list of parts.\n//\n// See http://goo.gl/vJfTG for an overview of multipart uploads.\ntype Multi struct {\n\tBucket   *Bucket\n\tKey      string\n\tUploadId string\n}\n\n// That's the default. Here just for testing.\nvar listMultiMax = 1000\n\ntype listMultiResp struct {\n\tNextKeyMarker      string\n\tNextUploadIdMarker string\n\tIsTruncated        bool\n\tUpload             []Multi\n\tCommonPrefixes     []string `xml:\"CommonPrefixes>Prefix\"`\n}\n\n// ListMulti returns the list of unfinished multipart uploads in b.\n//\n// The prefix parameter limits the response to keys that begin with the\n// specified prefix. You can use prefixes to separate a bucket into different\n// groupings of keys (to get the feeling of folders, for example).\n//\n// The delim parameter causes the response to group all of the keys that\n// share a common prefix up to the next delimiter in a single entry within\n// the CommonPrefixes field. You can use delimiters to separate a bucket\n// into different groupings of keys, similar to how folders would work.\n//\n// See http://goo.gl/ePioY for details.\nfunc (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {\n\tparams := map[string][]string{\n\t\t\"uploads\":     {\"\"},\n\t\t\"max-uploads\": {strconv.FormatInt(int64(listMultiMax), 10)},\n\t\t\"prefix\":      {prefix},\n\t\t\"delimiter\":   {delim},\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\treq := &request{\n\t\t\tmethod: \"GET\",\n\t\t\tbucket: b.Name,\n\t\t\tparams: params,\n\t\t}\n\t\tvar resp listMultiResp\n\t\terr := b.S3.query(req, &resp)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tfor i := range resp.Upload {\n\t\t\tmulti := &resp.Upload[i]\n\t\t\tmulti.Bucket = b\n\t\t\tmultis = append(multis, multi)\n\t\t}\n\t\tprefixes = append(prefixes, resp.CommonPrefixes...)\n\t\tif !resp.IsTruncated {\n\t\t\treturn multis, prefixes, nil\n\t\t}\n\t\tparams[\"key-marker\"] = []string{resp.NextKeyMarker}\n\t\tparams[\"upload-id-marker\"] = []string{resp.NextUploadIdMarker}\n\t\tattempt = attempts.Start() // Last request worked.\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Multi returns a multipart upload handler for the provided key\n// inside b. If a multipart upload exists for key, it is returned,\n// otherwise a new multipart upload is initiated with contType and perm.\nfunc (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {\n\tmultis, _, err := b.ListMulti(key, \"\")\n\tif err != nil && !hasCode(err, \"NoSuchUpload\") {\n\t\treturn nil, err\n\t}\n\tfor _, m := range multis {\n\t\tif m.Key == key {\n\t\t\treturn m, nil\n\t\t}\n\t}\n\treturn b.InitMulti(key, contType, perm)\n}\n\n// InitMulti initializes a new multipart upload at the provided\n// key inside b and returns a value for manipulating it.\n//\n// See http://goo.gl/XP8kL for details.\nfunc (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {\n\theaders := map[string][]string{\n\t\t\"Content-Type\":   {contType},\n\t\t\"Content-Length\": {\"0\"},\n\t\t\"x-amz-acl\":      {string(perm)},\n\t}\n\tparams := map[string][]string{\n\t\t\"uploads\": {\"\"},\n\t}\n\treq := &request{\n\t\tmethod:  \"POST\",\n\t\tbucket:  b.Name,\n\t\tpath:    key,\n\t\theaders: headers,\n\t\tparams:  params,\n\t}\n\tvar err error\n\tvar resp struct {\n\t\tUploadId string `xml:\"UploadId\"`\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\terr = b.S3.query(req, &resp)\n\t\tif !shouldRetry(err) {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil\n}\n\n// PutPart sends part n of the multipart upload, reading all the content from r.\n// Each part, except for the last one, must be at least 5MB in size.\n//\n// See http://goo.gl/pqZer for details.\nfunc (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {\n\tpartSize, _, md5b64, err := seekerInfo(r)\n\tif err != nil {\n\t\treturn Part{}, err\n\t}\n\treturn m.putPart(n, r, partSize, md5b64)\n}\n\nfunc (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {\n\theaders := map[string][]string{\n\t\t\"Content-Length\": {strconv.FormatInt(partSize, 10)},\n\t\t\"Content-MD5\":    {md5b64},\n\t}\n\tparams := map[string][]string{\n\t\t\"uploadId\":   {m.UploadId},\n\t\t\"partNumber\": {strconv.FormatInt(int64(n), 10)},\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\t_, err := r.Seek(0, 0)\n\t\tif err != nil {\n\t\t\treturn Part{}, err\n\t\t}\n\t\treq := &request{\n\t\t\tmethod:  \"PUT\",\n\t\t\tbucket:  m.Bucket.Name,\n\t\t\tpath:    m.Key,\n\t\t\theaders: headers,\n\t\t\tparams:  params,\n\t\t\tpayload: r,\n\t\t}\n\t\terr = m.Bucket.S3.prepare(req)\n\t\tif err != nil {\n\t\t\treturn Part{}, err\n\t\t}\n\t\tresp, err := m.Bucket.S3.run(req, nil)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn Part{}, err\n\t\t}\n\t\tetag := resp.Header.Get(\"ETag\")\n\t\tif etag == \"\" {\n\t\t\treturn Part{}, errors.New(\"part upload succeeded with no ETag\")\n\t\t}\n\t\treturn Part{n, etag, partSize}, nil\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {\n\t_, err = r.Seek(0, 0)\n\tif err != nil {\n\t\treturn 0, \"\", \"\", err\n\t}\n\tdigest := md5.New()\n\tsize, err = io.Copy(digest, r)\n\tif err != nil {\n\t\treturn 0, \"\", \"\", err\n\t}\n\tsum := digest.Sum(nil)\n\tmd5hex = hex.EncodeToString(sum)\n\tmd5b64 = base64.StdEncoding.EncodeToString(sum)\n\treturn size, md5hex, md5b64, nil\n}\n\ntype Part struct {\n\tN    int `xml:\"PartNumber\"`\n\tETag string\n\tSize int64\n}\n\ntype partSlice []Part\n\nfunc (s partSlice) Len() int           { return len(s) }\nfunc (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }\nfunc (s partSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n\ntype listPartsResp struct {\n\tNextPartNumberMarker string\n\tIsTruncated          bool\n\tPart                 []Part\n}\n\n// That's the default. Here just for testing.\nvar listPartsMax = 1000\n\n// ListParts returns the list of previously uploaded parts in m,\n// ordered by part number.\n//\n// See http://goo.gl/ePioY for details.\nfunc (m *Multi) ListParts() ([]Part, error) {\n\tparams := map[string][]string{\n\t\t\"uploadId\":  {m.UploadId},\n\t\t\"max-parts\": {strconv.FormatInt(int64(listPartsMax), 10)},\n\t}\n\tvar parts partSlice\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\treq := &request{\n\t\t\tmethod: \"GET\",\n\t\t\tbucket: m.Bucket.Name,\n\t\t\tpath:   m.Key,\n\t\t\tparams: params,\n\t\t}\n\t\tvar resp listPartsResp\n\t\terr := m.Bucket.S3.query(req, &resp)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparts = append(parts, resp.Part...)\n\t\tif !resp.IsTruncated {\n\t\t\tsort.Sort(parts)\n\t\t\treturn parts, nil\n\t\t}\n\t\tparams[\"part-number-marker\"] = []string{resp.NextPartNumberMarker}\n\t\tattempt = attempts.Start() // Last request worked.\n\t}\n\tpanic(\"unreachable\")\n}\n\ntype ReaderAtSeeker interface {\n\tio.ReaderAt\n\tio.ReadSeeker\n}\n\n// PutAll sends all of r via a multipart upload with parts no larger\n// than partSize bytes, which must be set to at least 5MB.\n// Parts previously uploaded are either reused if their checksum\n// and size match the new part, or otherwise overwritten with the\n// new content.\n// PutAll returns all the parts of m (reused or not).\nfunc (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {\n\told, err := m.ListParts()\n\tif err != nil && !hasCode(err, \"NoSuchUpload\") {\n\t\treturn nil, err\n\t}\n\treuse := 0   // Index of next old part to consider reusing.\n\tcurrent := 1 // Part number of latest good part handled.\n\ttotalSize, err := r.Seek(0, 2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfirst := true // Must send at least one empty part if the file is empty.\n\tvar result []Part\nNextSection:\n\tfor offset := int64(0); offset < totalSize || first; offset += partSize {\n\t\tfirst = false\n\t\tif offset+partSize > totalSize {\n\t\t\tpartSize = totalSize - offset\n\t\t}\n\t\tsection := io.NewSectionReader(r, offset, partSize)\n\t\t_, md5hex, md5b64, err := seekerInfo(section)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor reuse < len(old) && old[reuse].N <= current {\n\t\t\t// Looks like this part was already sent.\n\t\t\tpart := &old[reuse]\n\t\t\tetag := `\"` + md5hex + `\"`\n\t\t\tif part.N == current && part.Size == partSize && part.ETag == etag {\n\t\t\t\t// Checksum matches. Reuse the old part.\n\t\t\t\tresult = append(result, *part)\n\t\t\t\tcurrent++\n\t\t\t\tcontinue NextSection\n\t\t\t}\n\t\t\treuse++\n\t\t}\n\n\t\t// Part wasn't found or doesn't match. Send it.\n\t\tpart, err := m.putPart(current, section, partSize, md5b64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, part)\n\t\tcurrent++\n\t}\n\treturn result, nil\n}\n\ntype completeUpload struct {\n\tXMLName xml.Name      `xml:\"CompleteMultipartUpload\"`\n\tParts   completeParts `xml:\"Part\"`\n}\n\ntype completePart struct {\n\tPartNumber int\n\tETag       string\n}\n\ntype completeParts []completePart\n\nfunc (p completeParts) Len() int           { return len(p) }\nfunc (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }\nfunc (p completeParts) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\n\n// Complete assembles the given previously uploaded parts into the\n// final object. This operation may take several minutes.\n//\n// See http://goo.gl/2Z7Tw for details.\nfunc (m *Multi) Complete(parts []Part) error {\n\tparams := map[string][]string{\n\t\t\"uploadId\": {m.UploadId},\n\t}\n\tc := completeUpload{}\n\tfor _, p := range parts {\n\t\tc.Parts = append(c.Parts, completePart{p.N, p.ETag})\n\t}\n\tsort.Sort(c.Parts)\n\tdata, err := xml.Marshal(&c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\treq := &request{\n\t\t\tmethod:  \"POST\",\n\t\t\tbucket:  m.Bucket.Name,\n\t\t\tpath:    m.Key,\n\t\t\tparams:  params,\n\t\t\tpayload: bytes.NewReader(data),\n\t\t}\n\t\terr := m.Bucket.S3.query(req, nil)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Abort deletes an unifinished multipart upload and any previously\n// uploaded parts for it.\n//\n// After a multipart upload is aborted, no additional parts can be\n// uploaded using it. However, if any part uploads are currently in\n// progress, those part uploads might or might not succeed. As a result,\n// it might be necessary to abort a given multipart upload multiple\n// times in order to completely free all storage consumed by all parts.\n//\n// NOTE: If the described scenario happens to you, please report back to\n// the goamz authors with details. In the future such retrying should be\n// handled internally, but it's not clear what happens precisely (Is an\n// error returned? Is the issue completely undetectable?).\n//\n// See http://goo.gl/dnyJw for details.\nfunc (m *Multi) Abort() error {\n\tparams := map[string][]string{\n\t\t\"uploadId\": {m.UploadId},\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\treq := &request{\n\t\t\tmethod: \"DELETE\",\n\t\t\tbucket: m.Bucket.Name,\n\t\t\tpath:   m.Key,\n\t\t\tparams: params,\n\t\t}\n\t\terr := m.Bucket.S3.query(req, nil)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n\tpanic(\"unreachable\")\n}\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/s3/s3.go",
    "content": "//\n// goamz - Go packages to interact with the Amazon Web Services.\n//\n//   https://wiki.ubuntu.com/goamz\n//\n// Copyright (c) 2011 Canonical Ltd.\n//\n// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>\n//\n\npackage s3\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"encoding/base64\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"github.com/mitchellh/goamz/aws\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst debug = false\n\n// The S3 type encapsulates operations with an S3 region.\ntype S3 struct {\n\taws.Auth\n\taws.Region\n\tHTTPClient func() *http.Client\n\n\tprivate byte // Reserve the right of using private data.\n}\n\n// The Bucket type encapsulates operations with an S3 bucket.\ntype Bucket struct {\n\t*S3\n\tName string\n}\n\n// The Owner type represents the owner of the object in an S3 bucket.\ntype Owner struct {\n\tID          string\n\tDisplayName string\n}\n\nvar attempts = aws.AttemptStrategy{\n\tMin:   5,\n\tTotal: 5 * time.Second,\n\tDelay: 200 * time.Millisecond,\n}\n\n// New creates a new S3.\nfunc New(auth aws.Auth, region aws.Region) *S3 {\n\treturn &S3{\n\t\tAuth:   auth,\n\t\tRegion: region,\n\t\tHTTPClient: func() *http.Client {\n\t\t\treturn http.DefaultClient\n\t\t},\n\t\tprivate: 0}\n}\n\n// Bucket returns a Bucket with the given name.\nfunc (s3 *S3) Bucket(name string) *Bucket {\n\tif s3.Region.S3BucketEndpoint != \"\" || s3.Region.S3LowercaseBucket {\n\t\tname = strings.ToLower(name)\n\t}\n\treturn &Bucket{s3, name}\n}\n\nvar createBucketConfiguration = `<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n  <LocationConstraint>%s</LocationConstraint>\n</CreateBucketConfiguration>`\n\n// locationConstraint returns an io.Reader specifying a LocationConstraint if\n// required for the region.\n//\n// See http://goo.gl/bh9Kq for details.\nfunc (s3 *S3) locationConstraint() io.Reader {\n\tconstraint := \"\"\n\tif s3.Region.S3LocationConstraint {\n\t\tconstraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)\n\t}\n\treturn strings.NewReader(constraint)\n}\n\ntype ACL string\n\nconst (\n\tPrivate           = ACL(\"private\")\n\tPublicRead        = ACL(\"public-read\")\n\tPublicReadWrite   = ACL(\"public-read-write\")\n\tAuthenticatedRead = ACL(\"authenticated-read\")\n\tBucketOwnerRead   = ACL(\"bucket-owner-read\")\n\tBucketOwnerFull   = ACL(\"bucket-owner-full-control\")\n)\n\n// The ListBucketsResp type holds the results of a List buckets operation.\ntype ListBucketsResp struct {\n\tBuckets []Bucket `xml:\">Bucket\"`\n}\n\n// ListBuckets lists all buckets\n//\n// See: http://goo.gl/NqlyMN\nfunc (s3 *S3) ListBuckets() (result *ListBucketsResp, err error) {\n\treq := &request{\n\t\tpath: \"/\",\n\t}\n\tresult = &ListBucketsResp{}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\terr = s3.query(req, result)\n\t\tif !shouldRetry(err) {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// set S3 instance on buckets\n\tfor i := range result.Buckets {\n\t\tresult.Buckets[i].S3 = s3\n\t}\n\treturn result, nil\n}\n\n// PutBucket creates a new bucket.\n//\n// See http://goo.gl/ndjnR for details.\nfunc (b *Bucket) PutBucket(perm ACL) error {\n\theaders := map[string][]string{\n\t\t\"x-amz-acl\": {string(perm)},\n\t}\n\treq := &request{\n\t\tmethod:  \"PUT\",\n\t\tbucket:  b.Name,\n\t\tpath:    \"/\",\n\t\theaders: headers,\n\t\tpayload: b.locationConstraint(),\n\t}\n\treturn b.S3.query(req, nil)\n}\n\n// DelBucket removes an existing S3 bucket. All objects in the bucket must\n// be removed before the bucket itself can be removed.\n//\n// See http://goo.gl/GoBrY for details.\nfunc (b *Bucket) DelBucket() (err error) {\n\treq := &request{\n\t\tmethod: \"DELETE\",\n\t\tbucket: b.Name,\n\t\tpath:   \"/\",\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\terr = b.S3.query(req, nil)\n\t\tif !shouldRetry(err) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}\n\n// Get retrieves an object from an S3 bucket.\n//\n// See http://goo.gl/isCO7 for details.\nfunc (b *Bucket) Get(path string) (data []byte, err error) {\n\tbody, err := b.GetReader(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err = ioutil.ReadAll(body)\n\tbody.Close()\n\treturn data, err\n}\n\n// GetReader retrieves an object from an S3 bucket.\n// It is the caller's responsibility to call Close on rc when\n// finished reading.\nfunc (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {\n\tresp, err := b.GetResponse(path)\n\tif resp != nil {\n\t\treturn resp.Body, err\n\t}\n\treturn nil, err\n}\n\n// GetResponse retrieves an object from an S3 bucket returning the http response\n// It is the caller's responsibility to call Close on rc when\n// finished reading.\nfunc (b *Bucket) GetResponse(path string) (*http.Response, error) {\n\treturn b.getResponseParams(path, nil)\n}\n\n// GetTorrent retrieves an Torrent object from an S3 bucket an io.ReadCloser.\n// It is the caller's responsibility to call Close on rc when finished reading.\nfunc (b *Bucket) GetTorrentReader(path string) (io.ReadCloser, error) {\n\tresp, err := b.getResponseParams(path, url.Values{\"torrent\": {\"\"}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Body, nil\n}\n\n// GetTorrent retrieves an Torrent object from an S3, returning\n// the torrent as a []byte.\nfunc (b *Bucket) GetTorrent(path string) ([]byte, error) {\n\tbody, err := b.GetTorrentReader(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer body.Close()\n\n\treturn ioutil.ReadAll(body)\n}\n\nfunc (b *Bucket) getResponseParams(path string, params url.Values) (*http.Response, error) {\n\treq := &request{\n\t\tbucket: b.Name,\n\t\tpath:   path,\n\t\tparams: params,\n\t}\n\terr := b.S3.prepare(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\tresp, err := b.S3.run(req, nil)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn resp, nil\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (b *Bucket) Head(path string) (*http.Response, error) {\n\treq := &request{\n\t\tmethod: \"HEAD\",\n\t\tbucket: b.Name,\n\t\tpath:   path,\n\t}\n\terr := b.S3.prepare(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\tresp, err := b.S3.run(req, nil)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn resp, nil\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Put inserts an object into the S3 bucket.\n//\n// See http://goo.gl/FEBPD for details.\nfunc (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error {\n\tbody := bytes.NewBuffer(data)\n\treturn b.PutReader(path, body, int64(len(data)), contType, perm)\n}\n\n/*\nPutHeader - like Put, inserts an object into the S3 bucket.\nInstead of Content-Type string, pass in custom headers to override defaults.\n*/\nfunc (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error {\n\tbody := bytes.NewBuffer(data)\n\treturn b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm)\n}\n\n// PutReader inserts an object into the S3 bucket by consuming data\n// from r until EOF.\nfunc (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error {\n\theaders := map[string][]string{\n\t\t\"Content-Length\": {strconv.FormatInt(length, 10)},\n\t\t\"Content-Type\":   {contType},\n\t\t\"x-amz-acl\":      {string(perm)},\n\t}\n\treq := &request{\n\t\tmethod:  \"PUT\",\n\t\tbucket:  b.Name,\n\t\tpath:    path,\n\t\theaders: headers,\n\t\tpayload: r,\n\t}\n\treturn b.S3.query(req, nil)\n}\n\n/*\nPutReaderHeader - like PutReader, inserts an object into S3 from a reader.\nInstead of Content-Type string, pass in custom headers to override defaults.\n*/\nfunc (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error {\n\t// Default headers\n\theaders := map[string][]string{\n\t\t\"Content-Length\": {strconv.FormatInt(length, 10)},\n\t\t\"Content-Type\":   {\"application/text\"},\n\t\t\"x-amz-acl\":      {string(perm)},\n\t}\n\n\t// Override with custom headers\n\tfor key, value := range customHeaders {\n\t\theaders[key] = value\n\t}\n\n\treq := &request{\n\t\tmethod:  \"PUT\",\n\t\tbucket:  b.Name,\n\t\tpath:    path,\n\t\theaders: headers,\n\t\tpayload: r,\n\t}\n\treturn b.S3.query(req, nil)\n}\n\n/*\nCopy - copy objects inside bucket\n*/\nfunc (b *Bucket) Copy(oldPath, newPath string, perm ACL) error {\n\tif !strings.HasPrefix(oldPath, \"/\") {\n\t\toldPath = \"/\" + oldPath\n\t}\n\n\treq := &request{\n\t\tmethod: \"PUT\",\n\t\tbucket: b.Name,\n\t\tpath:   newPath,\n\t\theaders: map[string][]string{\n\t\t\t\"x-amz-copy-source\": {amazonEscape(\"/\" + b.Name + oldPath)},\n\t\t\t\"x-amz-acl\":         {string(perm)},\n\t\t},\n\t}\n\n\terr := b.S3.prepare(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\t_, err = b.S3.run(req, nil)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Del removes an object from the S3 bucket.\n//\n// See http://goo.gl/APeTt for details.\nfunc (b *Bucket) Del(path string) error {\n\treq := &request{\n\t\tmethod: \"DELETE\",\n\t\tbucket: b.Name,\n\t\tpath:   path,\n\t}\n\treturn b.S3.query(req, nil)\n}\n\ntype Object struct {\n\tKey string\n}\n\ntype MultiObjectDeleteBody struct {\n\tXMLName xml.Name `xml:\"Delete\"`\n\tQuiet   bool\n\tObject  []Object\n}\n\nfunc base64md5(data []byte) string {\n\th := md5.New()\n\th.Write(data)\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}\n\n// MultiDel removes multiple objects from the S3 bucket efficiently.\n// A maximum of 1000 keys at once may be specified.\n//\n// See http://goo.gl/WvA5sj for details.\nfunc (b *Bucket) MultiDel(paths []string) error {\n\t// create XML payload\n\tv := MultiObjectDeleteBody{}\n\tv.Object = make([]Object, len(paths))\n\tfor i, path := range paths {\n\t\tv.Object[i] = Object{path}\n\t}\n\tdata, _ := xml.Marshal(v)\n\n\t// Content-MD5 is required\n\tmd5hash := base64md5(data)\n\treq := &request{\n\t\tmethod:  \"POST\",\n\t\tbucket:  b.Name,\n\t\tpath:    \"/\",\n\t\tparams:  url.Values{\"delete\": {\"\"}},\n\t\theaders: http.Header{\"Content-MD5\": {md5hash}},\n\t\tpayload: bytes.NewReader(data),\n\t}\n\n\treturn b.S3.query(req, nil)\n}\n\n// The ListResp type holds the results of a List bucket operation.\ntype ListResp struct {\n\tName       string\n\tPrefix     string\n\tDelimiter  string\n\tMarker     string\n\tNextMarker string\n\tMaxKeys    int\n\t// IsTruncated is true if the results have been truncated because\n\t// there are more keys and prefixes than can fit in MaxKeys.\n\t// N.B. this is the opposite sense to that documented (incorrectly) in\n\t// http://goo.gl/YjQTc\n\tIsTruncated    bool\n\tContents       []Key\n\tCommonPrefixes []string `xml:\">Prefix\"`\n}\n\n// The Key type represents an item stored in an S3 bucket.\ntype Key struct {\n\tKey          string\n\tLastModified string\n\tSize         int64\n\t// ETag gives the hex-encoded MD5 sum of the contents,\n\t// surrounded with double-quotes.\n\tETag         string\n\tStorageClass string\n\tOwner        Owner\n}\n\n// List returns information about objects in an S3 bucket.\n//\n// The prefix parameter limits the response to keys that begin with the\n// specified prefix.\n//\n// The delim parameter causes the response to group all of the keys that\n// share a common prefix up to the next delimiter in a single entry within\n// the CommonPrefixes field. You can use delimiters to separate a bucket\n// into different groupings of keys, similar to how folders would work.\n//\n// The marker parameter specifies the key to start with when listing objects\n// in a bucket. Amazon S3 lists objects in alphabetical order and\n// will return keys alphabetically greater than the marker.\n//\n// The max parameter specifies how many keys + common prefixes to return in\n// the response. The default is 1000.\n//\n// For example, given these keys in a bucket:\n//\n//     index.html\n//     index2.html\n//     photos/2006/January/sample.jpg\n//     photos/2006/February/sample2.jpg\n//     photos/2006/February/sample3.jpg\n//     photos/2006/February/sample4.jpg\n//\n// Listing this bucket with delimiter set to \"/\" would yield the\n// following result:\n//\n//     &ListResp{\n//         Name:      \"sample-bucket\",\n//         MaxKeys:   1000,\n//         Delimiter: \"/\",\n//         Contents:  []Key{\n//             {Key: \"index.html\", \"index2.html\"},\n//         },\n//         CommonPrefixes: []string{\n//             \"photos/\",\n//         },\n//     }\n//\n// Listing the same bucket with delimiter set to \"/\" and prefix set to\n// \"photos/2006/\" would yield the following result:\n//\n//     &ListResp{\n//         Name:      \"sample-bucket\",\n//         MaxKeys:   1000,\n//         Delimiter: \"/\",\n//         Prefix:    \"photos/2006/\",\n//         CommonPrefixes: []string{\n//             \"photos/2006/February/\",\n//             \"photos/2006/January/\",\n//         },\n//     }\n//\n// See http://goo.gl/YjQTc for details.\nfunc (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {\n\tparams := map[string][]string{\n\t\t\"prefix\":    {prefix},\n\t\t\"delimiter\": {delim},\n\t\t\"marker\":    {marker},\n\t}\n\tif max != 0 {\n\t\tparams[\"max-keys\"] = []string{strconv.FormatInt(int64(max), 10)}\n\t}\n\treq := &request{\n\t\tbucket: b.Name,\n\t\tparams: params,\n\t}\n\tresult = &ListResp{}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\terr = b.S3.query(req, result)\n\t\tif !shouldRetry(err) {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}\n\n// Returns a mapping of all key names in this bucket to Key objects\nfunc (b *Bucket) GetBucketContents() (*map[string]Key, error) {\n\tbucket_contents := map[string]Key{}\n\tprefix := \"\"\n\tpath_separator := \"\"\n\tmarker := \"\"\n\tfor {\n\t\tcontents, err := b.List(prefix, path_separator, marker, 1000)\n\t\tif err != nil {\n\t\t\treturn &bucket_contents, err\n\t\t}\n\t\tlast_key := \"\"\n\t\tfor _, key := range contents.Contents {\n\t\t\tbucket_contents[key.Key] = key\n\t\t\tlast_key = key.Key\n\t\t}\n\t\tif contents.IsTruncated {\n\t\t\tmarker = contents.NextMarker\n\t\t\tif marker == \"\" {\n\t\t\t\t// From the s3 docs: If response does not include the\n\t\t\t\t// NextMarker and it is truncated, you can use the value of the\n\t\t\t\t// last Key in the response as the marker in the subsequent\n\t\t\t\t// request to get the next set of object keys.\n\t\t\t\tmarker = last_key\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &bucket_contents, nil\n}\n\n// Get metadata from the key without returning the key content\nfunc (b *Bucket) GetKey(path string) (*Key, error) {\n\treq := &request{\n\t\tbucket: b.Name,\n\t\tpath:   path,\n\t\tmethod: \"HEAD\",\n\t}\n\terr := b.S3.prepare(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey := &Key{}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\tresp, err := b.S3.run(req, nil)\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey.Key = path\n\t\tkey.LastModified = resp.Header.Get(\"Last-Modified\")\n\t\tkey.ETag = resp.Header.Get(\"ETag\")\n\t\tcontentLength := resp.Header.Get(\"Content-Length\")\n\t\tsize, err := strconv.ParseInt(contentLength, 10, 64)\n\t\tif err != nil {\n\t\t\treturn key, fmt.Errorf(\"bad s3 content-length %v: %v\",\n\t\t\t\tcontentLength, err)\n\t\t}\n\t\tkey.Size = size\n\t\treturn key, nil\n\t}\n\tpanic(\"unreachable\")\n}\n\n// URL returns a non-signed URL that allows retriving the\n// object at path. It only works if the object is publicly\n// readable (see SignedURL).\nfunc (b *Bucket) URL(path string) string {\n\treq := &request{\n\t\tbucket: b.Name,\n\t\tpath:   path,\n\t}\n\terr := b.S3.prepare(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tu, err := req.url(true)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tu.RawQuery = \"\"\n\treturn u.String()\n}\n\n// SignedURL returns a signed URL that allows anyone holding the URL\n// to retrieve the object at path. The signature is valid until expires.\nfunc (b *Bucket) SignedURL(path string, expires time.Time) string {\n\treq := &request{\n\t\tbucket: b.Name,\n\t\tpath:   path,\n\t\tparams: url.Values{\"Expires\": {strconv.FormatInt(expires.Unix(), 10)}},\n\t}\n\terr := b.S3.prepare(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tu, err := req.url(true)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn u.String()\n}\n\ntype request struct {\n\tmethod   string\n\tbucket   string\n\tpath     string\n\tsignpath string\n\tparams   url.Values\n\theaders  http.Header\n\tbaseurl  string\n\tpayload  io.Reader\n\tprepared bool\n}\n\n// amazonShouldEscape returns true if byte should be escaped\nfunc amazonShouldEscape(c byte) bool {\n\treturn !((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||\n\t\t(c >= '0' && c <= '9') || c == '_' || c == '-' || c == '~' || c == '.' || c == '/' || c == ':')\n}\n\n// amazonEscape does uri escaping exactly as Amazon does\nfunc amazonEscape(s string) string {\n\thexCount := 0\n\n\tfor i := 0; i < len(s); i++ {\n\t\tif amazonShouldEscape(s[i]) {\n\t\t\thexCount++\n\t\t}\n\t}\n\n\tif hexCount == 0 {\n\t\treturn s\n\t}\n\n\tt := make([]byte, len(s)+2*hexCount)\n\tj := 0\n\tfor i := 0; i < len(s); i++ {\n\t\tif c := s[i]; amazonShouldEscape(c) {\n\t\t\tt[j] = '%'\n\t\t\tt[j+1] = \"0123456789ABCDEF\"[c>>4]\n\t\t\tt[j+2] = \"0123456789ABCDEF\"[c&15]\n\t\t\tj += 3\n\t\t} else {\n\t\t\tt[j] = s[i]\n\t\t\tj++\n\t\t}\n\t}\n\treturn string(t)\n}\n\n// url returns url to resource, either full (with host/scheme) or\n// partial for HTTP request\nfunc (req *request) url(full bool) (*url.URL, error) {\n\tu, err := url.Parse(req.baseurl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bad S3 endpoint URL %q: %v\", req.baseurl, err)\n\t}\n\n\tu.Opaque = amazonEscape(req.path)\n\tif full {\n\t\tu.Opaque = \"//\" + u.Host + u.Opaque\n\t}\n\tu.RawQuery = req.params.Encode()\n\n\treturn u, nil\n}\n\n// query prepares and runs the req request.\n// If resp is not nil, the XML data contained in the response\n// body will be unmarshalled on it.\nfunc (s3 *S3) query(req *request, resp interface{}) error {\n\terr := s3.prepare(req)\n\tif err == nil {\n\t\tvar httpResponse *http.Response\n\t\thttpResponse, err = s3.run(req, resp)\n\t\tif resp == nil && httpResponse != nil {\n\t\t\thttpResponse.Body.Close()\n\t\t}\n\t}\n\treturn err\n}\n\n// prepare sets up req to be delivered to S3.\nfunc (s3 *S3) prepare(req *request) error {\n\tif !req.prepared {\n\t\treq.prepared = true\n\t\tif req.method == \"\" {\n\t\t\treq.method = \"GET\"\n\t\t}\n\t\t// Copy so they can be mutated without affecting on retries.\n\t\tparams := make(url.Values)\n\t\theaders := make(http.Header)\n\t\tfor k, v := range req.params {\n\t\t\tparams[k] = v\n\t\t}\n\t\tfor k, v := range req.headers {\n\t\t\theaders[k] = v\n\t\t}\n\t\treq.params = params\n\t\treq.headers = headers\n\t\tif !strings.HasPrefix(req.path, \"/\") {\n\t\t\treq.path = \"/\" + req.path\n\t\t}\n\t\treq.signpath = req.path\n\n\t\tif req.bucket != \"\" {\n\t\t\treq.baseurl = s3.Region.S3BucketEndpoint\n\t\t\tif req.baseurl == \"\" {\n\t\t\t\t// Use the path method to address the bucket.\n\t\t\t\treq.baseurl = s3.Region.S3Endpoint\n\t\t\t\treq.path = \"/\" + req.bucket + req.path\n\t\t\t} else {\n\t\t\t\t// Just in case, prevent injection.\n\t\t\t\tif strings.IndexAny(req.bucket, \"/:@\") >= 0 {\n\t\t\t\t\treturn fmt.Errorf(\"bad S3 bucket: %q\", req.bucket)\n\t\t\t\t}\n\t\t\t\treq.baseurl = strings.Replace(req.baseurl, \"${bucket}\", req.bucket, -1)\n\t\t\t}\n\t\t\treq.signpath = \"/\" + req.bucket + req.signpath\n\t\t} else {\n\t\t\treq.baseurl = s3.Region.S3Endpoint\n\t\t}\n\t}\n\n\t// Always sign again as it's not clear how far the\n\t// server has handled a previous attempt.\n\tu, err := url.Parse(req.baseurl)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bad S3 endpoint URL %q: %v\", req.baseurl, err)\n\t}\n\treq.headers[\"Host\"] = []string{u.Host}\n\treq.headers[\"Date\"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}\n\tsign(s3.Auth, req.method, amazonEscape(req.signpath), req.params, req.headers)\n\treturn nil\n}\n\n// run sends req and returns the http response from the server.\n// If resp is not nil, the XML data contained in the response\n// body will be unmarshalled on it.\nfunc (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {\n\tif debug {\n\t\tlog.Printf(\"Running S3 request: %#v\", req)\n\t}\n\n\tu, err := req.url(false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\threq := http.Request{\n\t\tURL:        u,\n\t\tMethod:     req.method,\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tClose:      true,\n\t\tHeader:     req.headers,\n\t}\n\n\tif v, ok := req.headers[\"Content-Length\"]; ok {\n\t\threq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)\n\t\tdelete(req.headers, \"Content-Length\")\n\t}\n\tif req.payload != nil {\n\t\threq.Body = ioutil.NopCloser(req.payload)\n\t}\n\n\thresp, err := s3.HTTPClient().Do(&hreq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif debug {\n\t\tdump, _ := httputil.DumpResponse(hresp, true)\n\t\tlog.Printf(\"} -> %s\\n\", dump)\n\t}\n\tif hresp.StatusCode != 200 && hresp.StatusCode != 204 {\n\t\tdefer hresp.Body.Close()\n\t\treturn nil, buildError(hresp)\n\t}\n\tif resp != nil {\n\t\terr = xml.NewDecoder(hresp.Body).Decode(resp)\n\t\thresp.Body.Close()\n\t}\n\treturn hresp, err\n}\n\n// Error represents an error in an operation with S3.\ntype Error struct {\n\tStatusCode int    // HTTP status code (200, 403, ...)\n\tCode       string // EC2 error code (\"UnsupportedOperation\", ...)\n\tMessage    string // The human-oriented error message\n\tBucketName string\n\tRequestId  string\n\tHostId     string\n}\n\nfunc (e *Error) Error() string {\n\treturn e.Message\n}\n\nfunc buildError(r *http.Response) error {\n\tif debug {\n\t\tlog.Printf(\"got error (status code %v)\", r.StatusCode)\n\t\tdata, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"\\tread error: %v\", err)\n\t\t} else {\n\t\t\tlog.Printf(\"\\tdata:\\n%s\\n\\n\", data)\n\t\t}\n\t\tr.Body = ioutil.NopCloser(bytes.NewBuffer(data))\n\t}\n\n\terr := Error{}\n\t// TODO return error if Unmarshal fails?\n\txml.NewDecoder(r.Body).Decode(&err)\n\tr.Body.Close()\n\terr.StatusCode = r.StatusCode\n\tif err.Message == \"\" {\n\t\terr.Message = r.Status\n\t}\n\tif debug {\n\t\tlog.Printf(\"err: %#v\\n\", err)\n\t}\n\treturn &err\n}\n\nfunc shouldRetry(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\tswitch err {\n\tcase io.ErrUnexpectedEOF, io.EOF:\n\t\treturn true\n\t}\n\tswitch e := err.(type) {\n\tcase *net.DNSError:\n\t\treturn true\n\tcase *net.OpError:\n\t\tswitch e.Op {\n\t\tcase \"read\", \"write\":\n\t\t\treturn true\n\t\t}\n\tcase *Error:\n\t\tswitch e.Code {\n\t\tcase \"InternalError\", \"NoSuchUpload\", \"NoSuchBucket\":\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc hasCode(err error, code string) bool {\n\ts3err, ok := err.(*Error)\n\treturn ok && s3err.Code == code\n}\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/s3/s3test/server.go",
    "content": "package s3test\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"encoding/hex\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"github.com/mitchellh/goamz/s3\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst debug = false\n\ntype s3Error struct {\n\tstatusCode int\n\tXMLName    struct{} `xml:\"Error\"`\n\tCode       string\n\tMessage    string\n\tBucketName string\n\tRequestId  string\n\tHostId     string\n}\n\ntype action struct {\n\tsrv   *Server\n\tw     http.ResponseWriter\n\treq   *http.Request\n\treqId string\n}\n\n// Config controls the internal behaviour of the Server. A nil config is the default\n// and behaves as if all configurations assume their default behaviour. Once passed\n// to NewServer, the configuration must not be modified.\ntype Config struct {\n\t// Send409Conflict controls how the Server will respond to calls to PUT on a\n\t// previously existing bucket. The default is false, and corresponds to the\n\t// us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of\n\t// all other regions.\n\t// http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html\n\tSend409Conflict bool\n}\n\nfunc (c *Config) send409Conflict() bool {\n\tif c != nil {\n\t\treturn c.Send409Conflict\n\t}\n\treturn false\n}\n\n// Server is a fake S3 server for testing purposes.\n// All of the data for the server is kept in memory.\ntype Server struct {\n\turl      string\n\treqId    int\n\tlistener net.Listener\n\tmu       sync.Mutex\n\tbuckets  map[string]*bucket\n\tconfig   *Config\n}\n\ntype bucket struct {\n\tname    string\n\tacl     s3.ACL\n\tctime   time.Time\n\tobjects map[string]*object\n}\n\ntype object struct {\n\tname     string\n\tmtime    time.Time\n\tmeta     http.Header // metadata to return with requests.\n\tchecksum []byte      // also held as Content-MD5 in meta.\n\tdata     []byte\n}\n\n// A resource encapsulates the subject of an HTTP request.\n// The resource referred to may or may not exist\n// when the request is made.\ntype resource interface {\n\tput(a *action) interface{}\n\tget(a *action) interface{}\n\tpost(a *action) interface{}\n\tdelete(a *action) interface{}\n}\n\nfunc NewServer(config *Config) (*Server, error) {\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot listen on localhost: %v\", err)\n\t}\n\tsrv := &Server{\n\t\tlistener: l,\n\t\turl:      \"http://\" + l.Addr().String(),\n\t\tbuckets:  make(map[string]*bucket),\n\t\tconfig:   config,\n\t}\n\tgo http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tsrv.serveHTTP(w, req)\n\t}))\n\treturn srv, nil\n}\n\n// Quit closes down the server.\nfunc (srv *Server) Quit() {\n\tsrv.listener.Close()\n}\n\n// URL returns a URL for the server.\nfunc (srv *Server) URL() string {\n\treturn srv.url\n}\n\nfunc fatalf(code int, codeStr string, errf string, a ...interface{}) {\n\tpanic(&s3Error{\n\t\tstatusCode: code,\n\t\tCode:       codeStr,\n\t\tMessage:    fmt.Sprintf(errf, a...),\n\t})\n}\n\n// serveHTTP serves the S3 protocol.\nfunc (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {\n\t// ignore error from ParseForm as it's usually spurious.\n\treq.ParseForm()\n\n\tsrv.mu.Lock()\n\tdefer srv.mu.Unlock()\n\n\tif debug {\n\t\tlog.Printf(\"s3test %q %q\", req.Method, req.URL)\n\t}\n\ta := &action{\n\t\tsrv:   srv,\n\t\tw:     w,\n\t\treq:   req,\n\t\treqId: fmt.Sprintf(\"%09X\", srv.reqId),\n\t}\n\tsrv.reqId++\n\n\tvar r resource\n\tdefer func() {\n\t\tswitch err := recover().(type) {\n\t\tcase *s3Error:\n\t\t\tswitch r := r.(type) {\n\t\t\tcase objectResource:\n\t\t\t\terr.BucketName = r.bucket.name\n\t\t\tcase bucketResource:\n\t\t\t\terr.BucketName = r.name\n\t\t\t}\n\t\t\terr.RequestId = a.reqId\n\t\t\t// TODO HostId\n\t\t\tw.Header().Set(\"Content-Type\", `xml version=\"1.0\" encoding=\"UTF-8\"`)\n\t\t\tw.WriteHeader(err.statusCode)\n\t\t\txmlMarshal(w, err)\n\t\tcase nil:\n\t\tdefault:\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tr = srv.resourceForURL(req.URL)\n\n\tvar resp interface{}\n\tswitch req.Method {\n\tcase \"PUT\":\n\t\tresp = r.put(a)\n\tcase \"GET\", \"HEAD\":\n\t\tresp = r.get(a)\n\tcase \"DELETE\":\n\t\tresp = r.delete(a)\n\tcase \"POST\":\n\t\tresp = r.post(a)\n\tdefault:\n\t\tfatalf(400, \"MethodNotAllowed\", \"unknown http request method %q\", req.Method)\n\t}\n\tif resp != nil && req.Method != \"HEAD\" {\n\t\txmlMarshal(w, resp)\n\t}\n}\n\n// xmlMarshal is the same as xml.Marshal except that\n// it panics on error. The marshalling should not fail,\n// but we want to know if it does.\nfunc xmlMarshal(w io.Writer, x interface{}) {\n\tif err := xml.NewEncoder(w).Encode(x); err != nil {\n\t\tpanic(fmt.Errorf(\"error marshalling %#v: %v\", x, err))\n\t}\n}\n\n// In a fully implemented test server, each of these would have\n// its own resource type.\nvar unimplementedBucketResourceNames = map[string]bool{\n\t\"acl\":            true,\n\t\"lifecycle\":      true,\n\t\"policy\":         true,\n\t\"location\":       true,\n\t\"logging\":        true,\n\t\"notification\":   true,\n\t\"versions\":       true,\n\t\"requestPayment\": true,\n\t\"versioning\":     true,\n\t\"website\":        true,\n\t\"uploads\":        true,\n}\n\nvar unimplementedObjectResourceNames = map[string]bool{\n\t\"uploadId\": true,\n\t\"acl\":      true,\n\t\"torrent\":  true,\n\t\"uploads\":  true,\n}\n\nvar pathRegexp = regexp.MustCompile(\"/(([^/]+)(/(.*))?)?\")\n\n// resourceForURL returns a resource object for the given URL.\nfunc (srv *Server) resourceForURL(u *url.URL) (r resource) {\n\n\tif u.Path == \"/\" {\n\t\treturn serviceResource{\n\t\t\tbuckets: srv.buckets,\n\t\t}\n\t}\n\n\tm := pathRegexp.FindStringSubmatch(u.Path)\n\tif m == nil {\n\t\tfatalf(404, \"InvalidURI\", \"Couldn't parse the specified URI\")\n\t}\n\tbucketName := m[2]\n\tobjectName := m[4]\n\tif bucketName == \"\" {\n\t\treturn nullResource{} // root\n\t}\n\tb := bucketResource{\n\t\tname:   bucketName,\n\t\tbucket: srv.buckets[bucketName],\n\t}\n\tq := u.Query()\n\tif objectName == \"\" {\n\t\tfor name := range q {\n\t\t\tif unimplementedBucketResourceNames[name] {\n\t\t\t\treturn nullResource{}\n\t\t\t}\n\t\t}\n\t\treturn b\n\n\t}\n\tif b.bucket == nil {\n\t\tfatalf(404, \"NoSuchBucket\", \"The specified bucket does not exist\")\n\t}\n\tobjr := objectResource{\n\t\tname:    objectName,\n\t\tversion: q.Get(\"versionId\"),\n\t\tbucket:  b.bucket,\n\t}\n\tfor name := range q {\n\t\tif unimplementedObjectResourceNames[name] {\n\t\t\treturn nullResource{}\n\t\t}\n\t}\n\tif obj := objr.bucket.objects[objr.name]; obj != nil {\n\t\tobjr.object = obj\n\t}\n\treturn objr\n}\n\n// nullResource has error stubs for all resource methods.\ntype nullResource struct{}\n\nfunc notAllowed() interface{} {\n\tfatalf(400, \"MethodNotAllowed\", \"The specified method is not allowed against this resource\")\n\treturn nil\n}\n\nfunc (nullResource) put(a *action) interface{}    { return notAllowed() }\nfunc (nullResource) get(a *action) interface{}    { return notAllowed() }\nfunc (nullResource) post(a *action) interface{}   { return notAllowed() }\nfunc (nullResource) delete(a *action) interface{} { return notAllowed() }\n\nconst timeFormat = \"2006-01-02T15:04:05.000Z07:00\"\n\ntype serviceResource struct {\n\tbuckets map[string]*bucket\n}\n\nfunc (serviceResource) put(a *action) interface{}    { return notAllowed() }\nfunc (serviceResource) post(a *action) interface{}   { return notAllowed() }\nfunc (serviceResource) delete(a *action) interface{} { return notAllowed() }\n\n// GET on an s3 service lists the buckets.\n// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html\nfunc (r serviceResource) get(a *action) interface{} {\n\ttype respBucket struct {\n\t\tName string\n\t}\n\n\ttype response struct {\n\t\tBuckets []respBucket `xml:\">Bucket\"`\n\t}\n\n\tresp := response{}\n\n\tfor _, bucketPtr := range r.buckets {\n\t\tbkt := respBucket{\n\t\t\tName: bucketPtr.name,\n\t\t}\n\t\tresp.Buckets = append(resp.Buckets, bkt)\n\t}\n\n\treturn &resp\n}\n\ntype bucketResource struct {\n\tname   string\n\tbucket *bucket // non-nil if the bucket already exists.\n}\n\n// GET on a bucket lists the objects in the bucket.\n// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html\nfunc (r bucketResource) get(a *action) interface{} {\n\tif r.bucket == nil {\n\t\tfatalf(404, \"NoSuchBucket\", \"The specified bucket does not exist\")\n\t}\n\tdelimiter := a.req.Form.Get(\"delimiter\")\n\tmarker := a.req.Form.Get(\"marker\")\n\tmaxKeys := -1\n\tif s := a.req.Form.Get(\"max-keys\"); s != \"\" {\n\t\ti, err := strconv.Atoi(s)\n\t\tif err != nil || i < 0 {\n\t\t\tfatalf(400, \"invalid value for max-keys: %q\", s)\n\t\t}\n\t\tmaxKeys = i\n\t}\n\tprefix := a.req.Form.Get(\"prefix\")\n\ta.w.Header().Set(\"Content-Type\", \"application/xml\")\n\n\tif a.req.Method == \"HEAD\" {\n\t\treturn nil\n\t}\n\n\tvar objs orderedObjects\n\n\t// first get all matching objects and arrange them in alphabetical order.\n\tfor name, obj := range r.bucket.objects {\n\t\tif strings.HasPrefix(name, prefix) {\n\t\t\tobjs = append(objs, obj)\n\t\t}\n\t}\n\tsort.Sort(objs)\n\n\tif maxKeys <= 0 {\n\t\tmaxKeys = 1000\n\t}\n\tresp := &s3.ListResp{\n\t\tName:      r.bucket.name,\n\t\tPrefix:    prefix,\n\t\tDelimiter: delimiter,\n\t\tMarker:    marker,\n\t\tMaxKeys:   maxKeys,\n\t}\n\n\tvar prefixes []string\n\tfor _, obj := range objs {\n\t\tif !strings.HasPrefix(obj.name, prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tname := obj.name\n\t\tisPrefix := false\n\t\tif delimiter != \"\" {\n\t\t\tif i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {\n\t\t\t\tname = obj.name[:len(prefix)+i+len(delimiter)]\n\t\t\t\tif prefixes != nil && prefixes[len(prefixes)-1] == name {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tisPrefix = true\n\t\t\t}\n\t\t}\n\t\tif name <= marker {\n\t\t\tcontinue\n\t\t}\n\t\tif len(resp.Contents)+len(prefixes) >= maxKeys {\n\t\t\tresp.IsTruncated = true\n\t\t\tbreak\n\t\t}\n\t\tif isPrefix {\n\t\t\tprefixes = append(prefixes, name)\n\t\t} else {\n\t\t\t// Contents contains only keys not found in CommonPrefixes\n\t\t\tresp.Contents = append(resp.Contents, obj.s3Key())\n\t\t}\n\t}\n\tresp.CommonPrefixes = prefixes\n\treturn resp\n}\n\n// orderedObjects holds a slice of objects that can be sorted\n// by name.\ntype orderedObjects []*object\n\nfunc (s orderedObjects) Len() int {\n\treturn len(s)\n}\nfunc (s orderedObjects) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\nfunc (s orderedObjects) Less(i, j int) bool {\n\treturn s[i].name < s[j].name\n}\n\nfunc (obj *object) s3Key() s3.Key {\n\treturn s3.Key{\n\t\tKey:          obj.name,\n\t\tLastModified: obj.mtime.Format(timeFormat),\n\t\tSize:         int64(len(obj.data)),\n\t\tETag:         fmt.Sprintf(`\"%x\"`, obj.checksum),\n\t\t// TODO StorageClass\n\t\t// TODO Owner\n\t}\n}\n\n// DELETE on a bucket deletes the bucket if it's not empty.\nfunc (r bucketResource) delete(a *action) interface{} {\n\tb := r.bucket\n\tif b == nil {\n\t\tfatalf(404, \"NoSuchBucket\", \"The specified bucket does not exist\")\n\t}\n\tif len(b.objects) > 0 {\n\t\tfatalf(400, \"BucketNotEmpty\", \"The bucket you tried to delete is not empty\")\n\t}\n\tdelete(a.srv.buckets, b.name)\n\treturn nil\n}\n\n// PUT on a bucket creates the bucket.\n// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html\nfunc (r bucketResource) put(a *action) interface{} {\n\tvar created bool\n\tif r.bucket == nil {\n\t\tif !validBucketName(r.name) {\n\t\t\tfatalf(400, \"InvalidBucketName\", \"The specified bucket is not valid\")\n\t\t}\n\t\tif loc := locationConstraint(a); loc == \"\" {\n\t\t\tfatalf(400, \"InvalidRequets\", \"The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.\")\n\t\t}\n\t\t// TODO validate acl\n\t\tr.bucket = &bucket{\n\t\t\tname: r.name,\n\t\t\t// TODO default acl\n\t\t\tobjects: make(map[string]*object),\n\t\t}\n\t\ta.srv.buckets[r.name] = r.bucket\n\t\tcreated = true\n\t}\n\tif !created && a.srv.config.send409Conflict() {\n\t\tfatalf(409, \"BucketAlreadyOwnedByYou\", \"Your previous request to create the named bucket succeeded and you already own it.\")\n\t}\n\tr.bucket.acl = s3.ACL(a.req.Header.Get(\"x-amz-acl\"))\n\treturn nil\n}\n\nfunc (bucketResource) post(a *action) interface{} {\n\tfatalf(400, \"Method\", \"bucket POST method not available\")\n\treturn nil\n}\n\n// validBucketName returns whether name is a valid bucket name.\n// Here are the rules, from:\n// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html\n//\n// Can contain lowercase letters, numbers, periods (.), underscores (_),\n// and dashes (-). You can use uppercase letters for buckets only in the\n// US Standard region.\n//\n// Must start with a number or letter\n//\n// Must be between 3 and 255 characters long\n//\n// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)\n// but the real S3 server does not seem to check that rule, so we will not\n// check it either.\n//\nfunc validBucketName(name string) bool {\n\tif len(name) < 3 || len(name) > 255 {\n\t\treturn false\n\t}\n\tr := name[0]\n\tif !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {\n\t\treturn false\n\t}\n\tfor _, r := range name {\n\t\tswitch {\n\t\tcase r >= '0' && r <= '9':\n\t\tcase r >= 'a' && r <= 'z':\n\t\tcase r == '_' || r == '-':\n\t\tcase r == '.':\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nvar responseParams = map[string]bool{\n\t\"content-type\":        true,\n\t\"content-language\":    true,\n\t\"expires\":             true,\n\t\"cache-control\":       true,\n\t\"content-disposition\": true,\n\t\"content-encoding\":    true,\n}\n\ntype objectResource struct {\n\tname    string\n\tversion string\n\tbucket  *bucket // always non-nil.\n\tobject  *object // may be nil.\n}\n\n// GET on an object gets the contents of the object.\n// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html\nfunc (objr objectResource) get(a *action) interface{} {\n\tobj := objr.object\n\tif obj == nil {\n\t\tfatalf(404, \"NoSuchKey\", \"The specified key does not exist.\")\n\t}\n\th := a.w.Header()\n\t// add metadata\n\tfor name, d := range obj.meta {\n\t\th[name] = d\n\t}\n\t// override header values in response to request parameters.\n\tfor name, vals := range a.req.Form {\n\t\tif strings.HasPrefix(name, \"response-\") {\n\t\t\tname = name[len(\"response-\"):]\n\t\t\tif !responseParams[name] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\th.Set(name, vals[0])\n\t\t}\n\t}\n\tif r := a.req.Header.Get(\"Range\"); r != \"\" {\n\t\tfatalf(400, \"NotImplemented\", \"range unimplemented\")\n\t}\n\t// TODO Last-Modified-Since\n\t// TODO If-Modified-Since\n\t// TODO If-Unmodified-Since\n\t// TODO If-Match\n\t// TODO If-None-Match\n\t// TODO Connection: close ??\n\t// TODO x-amz-request-id\n\th.Set(\"Content-Length\", fmt.Sprint(len(obj.data)))\n\th.Set(\"ETag\", hex.EncodeToString(obj.checksum))\n\th.Set(\"Last-Modified\", obj.mtime.Format(time.RFC1123))\n\tif a.req.Method == \"HEAD\" {\n\t\treturn nil\n\t}\n\t// TODO avoid holding the lock when writing data.\n\t_, err := a.w.Write(obj.data)\n\tif err != nil {\n\t\t// we can't do much except just log the fact.\n\t\tlog.Printf(\"error writing data: %v\", err)\n\t}\n\treturn nil\n}\n\nvar metaHeaders = map[string]bool{\n\t\"Content-MD5\":         true,\n\t\"x-amz-acl\":           true,\n\t\"Content-Type\":        true,\n\t\"Content-Encoding\":    true,\n\t\"Content-Disposition\": true,\n}\n\n// PUT on an object creates the object.\nfunc (objr objectResource) put(a *action) interface{} {\n\t// TODO Cache-Control header\n\t// TODO Expires header\n\t// TODO x-amz-server-side-encryption\n\t// TODO x-amz-storage-class\n\n\t// TODO is this correct, or should we erase all previous metadata?\n\tobj := objr.object\n\tif obj == nil {\n\t\tobj = &object{\n\t\t\tname: objr.name,\n\t\t\tmeta: make(http.Header),\n\t\t}\n\t}\n\n\tvar expectHash []byte\n\tif c := a.req.Header.Get(\"Content-MD5\"); c != \"\" {\n\t\tvar err error\n\t\texpectHash, err = hex.DecodeString(c)\n\t\tif err != nil || len(expectHash) != md5.Size {\n\t\t\tfatalf(400, \"InvalidDigest\", \"The Content-MD5 you specified was invalid\")\n\t\t}\n\t}\n\tsum := md5.New()\n\t// TODO avoid holding lock while reading data.\n\tdata, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))\n\tif err != nil {\n\t\tfatalf(400, \"TODO\", \"read error\")\n\t}\n\tgotHash := sum.Sum(nil)\n\tif expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {\n\t\tfatalf(400, \"BadDigest\", \"The Content-MD5 you specified did not match what we received\")\n\t}\n\tif a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {\n\t\tfatalf(400, \"IncompleteBody\", \"You did not provide the number of bytes specified by the Content-Length HTTP header\")\n\t}\n\n\t// PUT request has been successful - save data and metadata\n\tfor key, values := range a.req.Header {\n\t\tkey = http.CanonicalHeaderKey(key)\n\t\tif metaHeaders[key] || strings.HasPrefix(key, \"X-Amz-Meta-\") {\n\t\t\tobj.meta[key] = values\n\t\t}\n\t}\n\tobj.data = data\n\tobj.checksum = gotHash\n\tobj.mtime = time.Now()\n\tobjr.bucket.objects[objr.name] = obj\n\treturn nil\n}\n\nfunc (objr objectResource) delete(a *action) interface{} {\n\tdelete(objr.bucket.objects, objr.name)\n\treturn nil\n}\n\nfunc (objr objectResource) post(a *action) interface{} {\n\tfatalf(400, \"MethodNotAllowed\", \"The specified method is not allowed against this resource\")\n\treturn nil\n}\n\ntype CreateBucketConfiguration struct {\n\tLocationConstraint string\n}\n\n// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).\n// If there is no body, an empty string will be returned.\nfunc locationConstraint(a *action) string {\n\tvar body bytes.Buffer\n\tif _, err := io.Copy(&body, a.req.Body); err != nil {\n\t\tfatalf(400, \"InvalidRequest\", err.Error())\n\t}\n\tif body.Len() == 0 {\n\t\treturn \"\"\n\t}\n\tvar loc CreateBucketConfiguration\n\tif err := xml.NewDecoder(&body).Decode(&loc); err != nil {\n\t\tfatalf(400, \"InvalidRequest\", err.Error())\n\t}\n\treturn loc.LocationConstraint\n}\n"
  },
  {
    "path": "vendor/github.com/mitchellh/goamz/s3/sign.go",
    "content": "package s3\n\nimport (\n\t\"crypto/hmac\"\n\t\"crypto/sha1\"\n\t\"encoding/base64\"\n\t\"log\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/mitchellh/goamz/aws\"\n)\n\nvar b64 = base64.StdEncoding\n\n// ----------------------------------------------------------------------------\n// S3 signing (http://goo.gl/G1LrK)\n\nvar s3ParamsToSign = map[string]bool{\n\t\"acl\":                          true,\n\t\"delete\":                       true,\n\t\"location\":                     true,\n\t\"logging\":                      true,\n\t\"notification\":                 true,\n\t\"partNumber\":                   true,\n\t\"policy\":                       true,\n\t\"requestPayment\":               true,\n\t\"torrent\":                      true,\n\t\"uploadId\":                     true,\n\t\"uploads\":                      true,\n\t\"versionId\":                    true,\n\t\"versioning\":                   true,\n\t\"versions\":                     true,\n\t\"response-content-type\":        true,\n\t\"response-content-language\":    true,\n\t\"response-expires\":             true,\n\t\"response-cache-control\":       true,\n\t\"response-content-disposition\": true,\n\t\"response-content-encoding\":    true,\n}\n\nfunc sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {\n\tvar md5, ctype, date, xamz string\n\tvar xamzDate bool\n\tvar sarray []string\n\n\t// add security token\n\tif auth.Token != \"\" {\n\t\theaders[\"x-amz-security-token\"] = []string{auth.Token}\n\t}\n\n\tif auth.SecretKey == \"\" {\n\t\t// no auth secret; skip signing, e.g. for public read-only buckets.\n\t\treturn\n\t}\n\n\tfor k, v := range headers {\n\t\tk = strings.ToLower(k)\n\t\tswitch k {\n\t\tcase \"content-md5\":\n\t\t\tmd5 = v[0]\n\t\tcase \"content-type\":\n\t\t\tctype = v[0]\n\t\tcase \"date\":\n\t\t\tif !xamzDate {\n\t\t\t\tdate = v[0]\n\t\t\t}\n\t\tdefault:\n\t\t\tif strings.HasPrefix(k, \"x-amz-\") {\n\t\t\t\tvall := strings.Join(v, \",\")\n\t\t\t\tsarray = append(sarray, k+\":\"+vall)\n\t\t\t\tif k == \"x-amz-date\" {\n\t\t\t\t\txamzDate = true\n\t\t\t\t\tdate = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(sarray) > 0 {\n\t\tsort.StringSlice(sarray).Sort()\n\t\txamz = strings.Join(sarray, \"\\n\") + \"\\n\"\n\t}\n\n\texpires := false\n\tif v, ok := params[\"Expires\"]; ok {\n\t\t// Query string request authentication alternative.\n\t\texpires = true\n\t\tdate = v[0]\n\t\tparams[\"AWSAccessKeyId\"] = []string{auth.AccessKey}\n\t}\n\n\tsarray = sarray[0:0]\n\tfor k, v := range params {\n\t\tif s3ParamsToSign[k] {\n\t\t\tfor _, vi := range v {\n\t\t\t\tif vi == \"\" {\n\t\t\t\t\tsarray = append(sarray, k)\n\t\t\t\t} else {\n\t\t\t\t\t// \"When signing you do not encode these values.\"\n\t\t\t\t\tsarray = append(sarray, k+\"=\"+vi)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(sarray) > 0 {\n\t\tsort.StringSlice(sarray).Sort()\n\t\tcanonicalPath = canonicalPath + \"?\" + strings.Join(sarray, \"&\")\n\t}\n\n\tpayload := method + \"\\n\" + md5 + \"\\n\" + ctype + \"\\n\" + date + \"\\n\" + xamz + canonicalPath\n\thash := hmac.New(sha1.New, []byte(auth.SecretKey))\n\thash.Write([]byte(payload))\n\tsignature := make([]byte, b64.EncodedLen(hash.Size()))\n\tb64.Encode(signature, hash.Sum(nil))\n\n\tif expires {\n\t\tparams[\"Signature\"] = []string{string(signature)}\n\t} else {\n\t\theaders[\"Authorization\"] = []string{\"AWS \" + auth.AccessKey + \":\" + string(signature)}\n\t}\n\n\tif debug {\n\t\tlog.Printf(\"Signature payload: %q\", payload)\n\t\tlog.Printf(\"Signature: %q\", signature)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/trustmaster/go-aspell/README.md",
    "content": "# Aspell library bindings for Go\n\nGNU Aspell is a spell checking tool written in C/C++. This package provides simplified Aspell bindings for Go.\nIt uses UTF-8 by default and encapsulates some Aspell internals.\n\n## Getting started\n\nFirst make sure aspell library and headers are installed on your system. On Debian/Ubuntu you could install it this way:\n\n```\nsudo apt-get install aspell libaspell-dev\n```\n\nIt you need some more dictionaries you can install them like this:\n\n```\nsudo apt-get install aspell-ua aspell-se\n```\n\nThen you can install the package using the Go tool:\n\n```\ngo get github.com/trustmaster/go-aspell\n```\n\n## Usage\n\nHere is a simple spell checker program using the aspell package:\n\n```go\npackage main\n\nimport (\n\t\"github.com/trustmaster/go-aspell\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc main() {\n\t// Get a word from cmd line arguments\n\tif len(os.Args) != 2 {\n\t\tfmt.Print(\"Usage: aspell_example word\\n\")\n\t\treturn\n\t}\n\tword := os.Args[1]\n\n\t// Initialize the speller\n\tspeller, err := aspell.NewSpeller(map[string]string{\n\t\t\"lang\": \"en_US\",\n\t})\n\tif err != nil {\n\t\tfmt.Errorf(\"Error: %s\", err.Error())\n\t\treturn\n\t}\n\tdefer speller.Delete()\n\n\t// Check and suggest\n\tif speller.Check(word) {\n\t\tfmt.Print(\"OK\\n\")\n\t} else {\n\t\tfmt.Printf(\"Incorrect word, suggestions: %s\\n\", strings.Join(speller.Suggest(word), \", \"))\n\t}\n}\n```\n\nFor more information see [aspell_test.go](https://github.com/trustmaster/go-aspell/blob/master/aspell_test.go) file and use the godoc tool:\n\n```\ngodoc github.com/trustmaster/go-aspell\n```\n\n## License\n\nCopyright (c) 2012, Vladimir Sibirov\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/github.com/trustmaster/go-aspell/aspell.go",
    "content": "// Package aspell provides simplified bindings to GNU Aspell spell checking library.\npackage aspell\n\n/*\n#cgo LDFLAGS: -laspell\n#include <stdlib.h>\n#include \"aspell.h\"\n*/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"unsafe\"\n)\n\n// Speller is a type that encapsulates Aspell internals.\ntype Speller struct {\n\tconfig  *C.AspellConfig\n\tspeller *C.AspellSpeller\n}\n\n// NewSpeller creates a new speller instance with configuration options\n// given as a map. At least the language option should be specified\n// (see example below).\n//\n// The returned value is a speller struct. The second returned value\n// contains error data in case of error or nil if NewSpeller succeeded.\n//\n// In the most common case you would like to pass the language option\n// which accepts two letter ISO 639 language code and an optional\n// two letter ISO 3166 country code after a dash or underscore:\n//\n// \t\topts := map[string] string {\n// \t\t\t\"lang\": \"en_US\", // American English\n// \t\t}\n// \t\tspeller, err := aspell.NewSpeller(opts)\n// \t\tif err != nil {\n// \t\t\tpanic(\"Aspell error: \" + err.Error())\n// \t\t}\n// \t\tdefer speller.Delete()\n//\n// See available options at http://aspell.net/man-html/The-Options.html\n//\n// Because aspell package is a binding to Aspell C library, memory\n// allocated by NewSpeller() call has to be disposed explicitly.\n// This is why the above example contains a deferred call to Delete().\nfunc NewSpeller(options map[string]string) (Speller, error) {\n\tvar s Speller\n\n\t// Pass configuration options\n\ts.config = C.new_aspell_config()\n\tif _, hasEnc := options[\"encoding\"]; !hasEnc {\n\t\toptions[\"encoding\"] = \"utf-8\"\n\t}\n\tfor k, v := range options {\n\t\toptName := C.CString(k)\n\t\toptValue := C.CString(v)\n\t\tres := C.aspell_config_replace(s.config, optName, optValue)\n\t\tC.free(unsafe.Pointer(optName))\n\t\tC.free(unsafe.Pointer(optValue))\n\t\tif res == 0 {\n\t\t\tmsg := C.aspell_config_error_message(s.config)\n\t\t\terr := errors.New(C.GoString(msg))\n\t\t\tC.free(unsafe.Pointer(msg))\n\t\t\treturn s, err\n\t\t}\n\t}\n\n\t// Attempt to initialize the speller\n\tvar probErr *C.AspellCanHaveError\n\tprobErr = C.new_aspell_speller(s.config)\n\tC.delete_aspell_config(s.config)\n\tif C.aspell_error_number(probErr) != 0 {\n\t\tmsg := C.aspell_error_message(probErr)\n\t\terr := errors.New(C.GoString(msg))\n\t\tC.free(unsafe.Pointer(msg))\n\t\tC.delete_aspell_can_have_error(probErr)\n\t\treturn s, err\n\t}\n\n\t// Successful speller initialization\n\ts.speller = C.to_aspell_speller(probErr)\n\ts.config = C.aspell_speller_config(s.speller)\n\n\treturn s, nil\n}\n\n// Config returns current Aspell configuration option value for the speller.\n// It returns nil in case of error.\n// See available options at http://aspell.net/man-html/The-Options.html\nfunc (s Speller) Config(name string) string {\n\tcName := C.CString(name)\n\tcVal := C.aspell_config_retrieve(s.config, cName)\n\tval := C.GoString(cVal)\n\tC.free(unsafe.Pointer(cName))\n\tC.free(unsafe.Pointer(cVal))\n\treturn val\n}\n\n// Check looks the word up in the spell checker dictionary\n// and returns true if the word is found there or false\n// otherwise.\nfunc (s Speller) Check(word string) bool {\n\tcword := C.CString(word)\n\tdefer C.free(unsafe.Pointer(cword))\n\tres := C.aspell_speller_check(s.speller, cword, -1)\n\treturn res != 0\n}\n\n// Delete frees memory allocated by Aspell for the speller.\nfunc (s Speller) Delete() {\n\t// For some reason this breaks everything\n\t// if s.speller != nil {\n\t// \tC.delete_aspell_speller(s.speller)\n\t// }\n\t// s.config is deleted automatically\n}\n\n// wordListToSlice converts Aspell word list into Go slice.\nfunc wordListToSlice(list *C.AspellWordList) []string {\n\tif list == nil {\n\t\treturn nil\n\t}\n\tcount := int(C.aspell_word_list_size(list))\n\tresult := make([]string, count)\n\n\telems := C.aspell_word_list_elements(list)\n\tfor i := 0; i < count; i++ {\n\t\tword := C.aspell_string_enumeration_next(elems)\n\t\tif word == nil {\n\t\t\tbreak\n\t\t}\n\t\tresult[i] = C.GoString(word)\n\t}\n\tC.delete_aspell_string_enumeration(elems)\n\n\treturn result\n}\n\n// Suggest returns a slice of possible suggestions for the given word.\n// Nil is returned on error.\nfunc (s Speller) Suggest(word string) []string {\n\tcword := C.CString(word)\n\tdefer C.free(unsafe.Pointer(cword))\n\tsuggestions := C.aspell_speller_suggest(s.speller, cword, -1)\n\treturn wordListToSlice(suggestions)\n}\n\n// Replace saves a replacement pair to the spell checker so that it would\n// get higher probability on next Suggest call.\n// Returns true on success or false on error.\nfunc (s Speller) Replace(misspelled, correct string) bool {\n\tcmis := C.CString(misspelled)\n\tdefer C.free(unsafe.Pointer(cmis))\n\tccor := C.CString(correct)\n\tdefer C.free(unsafe.Pointer(ccor))\n\n\tret := C.aspell_speller_store_replacement(s.speller, cmis, -1, ccor, -1)\n\n\treturn ret != -1\n}\n\n// MainWordList returns the main word list used by the speller.\nfunc (s Speller) MainWordList() ([]string, error) {\n\tlist := C.aspell_speller_main_word_list(s.speller)\n\tif list == nil {\n\t\treturn nil, errors.New(\"Failed getting the main word list\")\n\t}\n\treturn wordListToSlice(list), nil\n}\n\n// Dict represents Aspell dictionary info.\ntype Dict struct {\n\tname   string\n\tcode   string\n\tjargon string\n\tsize   string\n\tmodule string\n}\n\n// Dicts returns the list of available aspell dictionaries.\nfunc Dicts() []Dict {\n\tconfig := C.new_aspell_config()\n\tdlist := C.get_aspell_dict_info_list(config)\n\tC.delete_aspell_config(config)\n\n\tcount := int(C.aspell_dict_info_list_size(dlist))\n\tresult := make([]Dict, count)\n\n\tdels := C.aspell_dict_info_list_elements(dlist)\n\tfor i := 0; i < count; i++ {\n\t\tentry := C.aspell_dict_info_enumeration_next(dels)\n\t\tif entry == nil {\n\t\t\tbreak\n\t\t}\n\t\tresult[i] = Dict{\n\t\t\tname:   C.GoString(entry.name),\n\t\t\tcode:   C.GoString(entry.code),\n\t\t\tjargon: C.GoString(entry.jargon),\n\t\t\tsize:   C.GoString(entry.size_str),\n\t\t\tmodule: C.GoString(entry.module.name),\n\t\t}\n\t}\n\tC.delete_aspell_dict_info_enumeration(dels)\n\n\treturn result\n}\n"
  },
  {
    "path": "vendor/github.com/vaughan0/go-ini/LICENSE",
    "content": "Copyright (c) 2013 Vaughan Newton\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\npersons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the\nSoftware.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\nWARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/vaughan0/go-ini/README.md",
    "content": "go-ini\n======\n\nINI parsing library for Go (golang).\n\nView the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).\n\nUsage\n-----\n\nParse an INI file:\n\n```go\nimport \"github.com/vaughan0/go-ini\"\n\nfile, err := ini.LoadFile(\"myfile.ini\")\n```\n\nGet data from the parsed file:\n\n```go\nname, ok := file.Get(\"person\", \"name\")\nif !ok {\n  panic(\"'name' variable missing from 'person' section\")\n}\n```\n\nIterate through values in a section:\n\n```go\nfor key, value := range file[\"mysection\"] {\n  fmt.Printf(\"%s => %s\\n\", key, value)\n}\n```\n\nIterate through sections in a file:\n\n```go\nfor name, section := range file {\n  fmt.Printf(\"Section name: %s\\n\", name)\n}\n```\n\nFile Format\n-----------\n\nINI files are parsed by go-ini line-by-line. Each line may be one of the following:\n\n  * A section definition: [section-name]\n  * A property: key = value\n  * A comment: #blahblah _or_ ;blahblah\n  * Blank. The line will be ignored.\n\nProperties defined before any section headers are placed in the default section, which has\nthe empty string as it's key.\n\nExample:\n\n```ini\n# I am a comment\n; So am I!\n\n[apples]\ncolour = red or green\nshape = applish\n\n[oranges]\nshape = square\ncolour = blue\n```\n"
  },
  {
    "path": "vendor/github.com/vaughan0/go-ini/ini.go",
    "content": "// Package ini provides functions for parsing INI configuration files.\npackage ini\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tsectionRegex = regexp.MustCompile(`^\\[(.*)\\]$`)\n\tassignRegex  = regexp.MustCompile(`^([^=]+)=(.*)$`)\n)\n\n// ErrSyntax is returned when there is a syntax error in an INI file.\ntype ErrSyntax struct {\n\tLine   int\n\tSource string // The contents of the erroneous line, without leading or trailing whitespace\n}\n\nfunc (e ErrSyntax) Error() string {\n\treturn fmt.Sprintf(\"invalid INI syntax on line %d: %s\", e.Line, e.Source)\n}\n\n// A File represents a parsed INI file.\ntype File map[string]Section\n\n// A Section represents a single section of an INI file.\ntype Section map[string]string\n\n// Returns a named Section. A Section will be created if one does not already exist for the given name.\nfunc (f File) Section(name string) Section {\n\tsection := f[name]\n\tif section == nil {\n\t\tsection = make(Section)\n\t\tf[name] = section\n\t}\n\treturn section\n}\n\n// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.\nfunc (f File) Get(section, key string) (value string, ok bool) {\n\tif s := f[section]; s != nil {\n\t\tvalue, ok = s[key]\n\t}\n\treturn\n}\n\n// Loads INI data from a reader and stores the data in the File.\nfunc (f File) Load(in io.Reader) (err error) {\n\tbufin, ok := in.(*bufio.Reader)\n\tif !ok {\n\t\tbufin = bufio.NewReader(in)\n\t}\n\treturn parseFile(bufin, f)\n}\n\n// Loads INI data from a named file and stores the data in the File.\nfunc (f File) LoadFile(file string) (err error) {\n\tin, err := os.Open(file)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer in.Close()\n\treturn f.Load(in)\n}\n\nfunc parseFile(in *bufio.Reader, file File) (err error) {\n\tsection := \"\"\n\tlineNum := 0\n\tfor done := false; !done; {\n\t\tvar line string\n\t\tif line, err = in.ReadString('\\n'); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tdone = true\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tlineNum++\n\t\tline = strings.TrimSpace(line)\n\t\tif len(line) == 0 {\n\t\t\t// Skip blank lines\n\t\t\tcontinue\n\t\t}\n\t\tif line[0] == ';' || line[0] == '#' {\n\t\t\t// Skip comments\n\t\t\tcontinue\n\t\t}\n\n\t\tif groups := assignRegex.FindStringSubmatch(line); groups != nil {\n\t\t\tkey, val := groups[1], groups[2]\n\t\t\tkey, val = strings.TrimSpace(key), strings.TrimSpace(val)\n\t\t\tfile.Section(section)[key] = val\n\t\t} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {\n\t\t\tname := strings.TrimSpace(groups[1])\n\t\t\tsection = name\n\t\t\t// Create the section if it does not exist\n\t\t\tfile.Section(section)\n\t\t} else {\n\t\t\treturn ErrSyntax{lineNum, line}\n\t\t}\n\n\t}\n\treturn nil\n}\n\n// Loads and returns a File from a reader.\nfunc Load(in io.Reader) (File, error) {\n\tfile := make(File)\n\terr := file.Load(in)\n\treturn file, err\n}\n\n// Loads and returns an INI File from a file on disk.\nfunc LoadFile(filename string) (File, error) {\n\tfile := make(File)\n\terr := file.LoadFile(filename)\n\treturn file, err\n}\n"
  },
  {
    "path": "vendor/github.com/vaughan0/go-ini/test.ini",
    "content": "[default]\nstuff = things\n"
  },
  {
    "path": "vendor/golang.org/x/crypto/LICENSE",
    "content": "Copyright (c) 2009 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/golang.org/x/crypto/PATENTS",
    "content": "Additional IP Rights Grant (Patents)\n\n\"This implementation\" means the copyrightable works distributed by\nGoogle as part of the Go project.\n\nGoogle hereby grants to You a perpetual, worldwide, non-exclusive,\nno-charge, royalty-free, irrevocable (except as stated in this section)\npatent license to make, have made, use, offer to sell, sell, import,\ntransfer and otherwise run, modify and propagate the contents of this\nimplementation of Go, where such license applies only to those patent\nclaims, both currently owned or controlled by Google and acquired in\nthe future, licensable by Google that are necessarily infringed by this\nimplementation of Go.  This grant does not include claims that would be\ninfringed only as a consequence of further modification of this\nimplementation.  If you or your agent or exclusive licensee institute or\norder or agree to the institution of patent litigation against any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging\nthat this implementation of Go or any code incorporated within this\nimplementation of Go constitutes direct or contributory patent\ninfringement, or inducement of patent infringement, then any patent\nrights granted to you under this License for this implementation of Go\nshall terminate as of the date such litigation is filed.\n"
  },
  {
    "path": "vendor/golang.org/x/crypto/ssh/terminal/terminal.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage terminal\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n\t\"unicode/utf8\"\n)\n\n// EscapeCodes contains escape sequences that can be written to the terminal in\n// order to achieve different styles of text.\ntype EscapeCodes struct {\n\t// Foreground colors\n\tBlack, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte\n\n\t// Reset all attributes\n\tReset []byte\n}\n\nvar vt100EscapeCodes = EscapeCodes{\n\tBlack:   []byte{keyEscape, '[', '3', '0', 'm'},\n\tRed:     []byte{keyEscape, '[', '3', '1', 'm'},\n\tGreen:   []byte{keyEscape, '[', '3', '2', 'm'},\n\tYellow:  []byte{keyEscape, '[', '3', '3', 'm'},\n\tBlue:    []byte{keyEscape, '[', '3', '4', 'm'},\n\tMagenta: []byte{keyEscape, '[', '3', '5', 'm'},\n\tCyan:    []byte{keyEscape, '[', '3', '6', 'm'},\n\tWhite:   []byte{keyEscape, '[', '3', '7', 'm'},\n\n\tReset: []byte{keyEscape, '[', '0', 'm'},\n}\n\n// Terminal contains the state for running a VT100 terminal that is capable of\n// reading lines of input.\ntype Terminal struct {\n\t// AutoCompleteCallback, if non-null, is called for each keypress with\n\t// the full input line and the current position of the cursor (in\n\t// bytes, as an index into |line|). If it returns ok=false, the key\n\t// press is processed normally. Otherwise it returns a replacement line\n\t// and the new cursor position.\n\tAutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)\n\n\t// Escape contains a pointer to the escape codes for this terminal.\n\t// It's always a valid pointer, although the escape codes themselves\n\t// may be empty if the terminal doesn't support them.\n\tEscape *EscapeCodes\n\n\t// lock protects the terminal and the state in this object from\n\t// concurrent processing of a key press and a Write() call.\n\tlock sync.Mutex\n\n\tc      io.ReadWriter\n\tprompt []rune\n\n\t// line is the current line being entered.\n\tline []rune\n\t// pos is the logical position of the cursor in line\n\tpos int\n\t// echo is true if local echo is enabled\n\techo bool\n\t// pasteActive is true iff there is a bracketed paste operation in\n\t// progress.\n\tpasteActive bool\n\n\t// cursorX contains the current X value of the cursor where the left\n\t// edge is 0. cursorY contains the row number where the first row of\n\t// the current line is 0.\n\tcursorX, cursorY int\n\t// maxLine is the greatest value of cursorY so far.\n\tmaxLine int\n\n\ttermWidth, termHeight int\n\n\t// outBuf contains the terminal data to be sent.\n\toutBuf []byte\n\t// remainder contains the remainder of any partial key sequences after\n\t// a read. It aliases into inBuf.\n\tremainder []byte\n\tinBuf     [256]byte\n\n\t// history contains previously entered commands so that they can be\n\t// accessed with the up and down keys.\n\thistory stRingBuffer\n\t// historyIndex stores the currently accessed history entry, where zero\n\t// means the immediately previous entry.\n\thistoryIndex int\n\t// When navigating up and down the history it's possible to return to\n\t// the incomplete, initial line. That value is stored in\n\t// historyPending.\n\thistoryPending string\n}\n\n// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is\n// a local terminal, that terminal must first have been put into raw mode.\n// prompt is a string that is written at the start of each input line (i.e.\n// \"> \").\nfunc NewTerminal(c io.ReadWriter, prompt string) *Terminal {\n\treturn &Terminal{\n\t\tEscape:       &vt100EscapeCodes,\n\t\tc:            c,\n\t\tprompt:       []rune(prompt),\n\t\ttermWidth:    80,\n\t\ttermHeight:   24,\n\t\techo:         true,\n\t\thistoryIndex: -1,\n\t}\n}\n\nconst (\n\tkeyCtrlD     = 4\n\tkeyCtrlU     = 21\n\tkeyEnter     = '\\r'\n\tkeyEscape    = 27\n\tkeyBackspace = 127\n\tkeyUnknown   = 0xd800 /* UTF-16 surrogate area */ + iota\n\tkeyUp\n\tkeyDown\n\tkeyLeft\n\tkeyRight\n\tkeyAltLeft\n\tkeyAltRight\n\tkeyHome\n\tkeyEnd\n\tkeyDeleteWord\n\tkeyDeleteLine\n\tkeyClearScreen\n\tkeyPasteStart\n\tkeyPasteEnd\n)\n\nvar pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}\nvar pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}\n\n// bytesToKey tries to parse a key sequence from b. If successful, it returns\n// the key and the remainder of the input. Otherwise it returns utf8.RuneError.\nfunc bytesToKey(b []byte, pasteActive bool) (rune, []byte) {\n\tif len(b) == 0 {\n\t\treturn utf8.RuneError, nil\n\t}\n\n\tif !pasteActive {\n\t\tswitch b[0] {\n\t\tcase 1: // ^A\n\t\t\treturn keyHome, b[1:]\n\t\tcase 5: // ^E\n\t\t\treturn keyEnd, b[1:]\n\t\tcase 8: // ^H\n\t\t\treturn keyBackspace, b[1:]\n\t\tcase 11: // ^K\n\t\t\treturn keyDeleteLine, b[1:]\n\t\tcase 12: // ^L\n\t\t\treturn keyClearScreen, b[1:]\n\t\tcase 23: // ^W\n\t\t\treturn keyDeleteWord, b[1:]\n\t\t}\n\t}\n\n\tif b[0] != keyEscape {\n\t\tif !utf8.FullRune(b) {\n\t\t\treturn utf8.RuneError, b\n\t\t}\n\t\tr, l := utf8.DecodeRune(b)\n\t\treturn r, b[l:]\n\t}\n\n\tif !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {\n\t\tswitch b[2] {\n\t\tcase 'A':\n\t\t\treturn keyUp, b[3:]\n\t\tcase 'B':\n\t\t\treturn keyDown, b[3:]\n\t\tcase 'C':\n\t\t\treturn keyRight, b[3:]\n\t\tcase 'D':\n\t\t\treturn keyLeft, b[3:]\n\t\tcase 'H':\n\t\t\treturn keyHome, b[3:]\n\t\tcase 'F':\n\t\t\treturn keyEnd, b[3:]\n\t\t}\n\t}\n\n\tif !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {\n\t\tswitch b[5] {\n\t\tcase 'C':\n\t\t\treturn keyAltRight, b[6:]\n\t\tcase 'D':\n\t\t\treturn keyAltLeft, b[6:]\n\t\t}\n\t}\n\n\tif !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {\n\t\treturn keyPasteStart, b[6:]\n\t}\n\n\tif pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {\n\t\treturn keyPasteEnd, b[6:]\n\t}\n\n\t// If we get here then we have a key that we don't recognise, or a\n\t// partial sequence. It's not clear how one should find the end of a\n\t// sequence without knowing them all, but it seems that [a-zA-Z~] only\n\t// appears at the end of a sequence.\n\tfor i, c := range b[0:] {\n\t\tif c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {\n\t\t\treturn keyUnknown, b[i+1:]\n\t\t}\n\t}\n\n\treturn utf8.RuneError, b\n}\n\n// queue appends data to the end of t.outBuf\nfunc (t *Terminal) queue(data []rune) {\n\tt.outBuf = append(t.outBuf, []byte(string(data))...)\n}\n\nvar eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}\nvar space = []rune{' '}\n\nfunc isPrintable(key rune) bool {\n\tisInSurrogateArea := key >= 0xd800 && key <= 0xdbff\n\treturn key >= 32 && !isInSurrogateArea\n}\n\n// moveCursorToPos appends data to t.outBuf which will move the cursor to the\n// given, logical position in the text.\nfunc (t *Terminal) moveCursorToPos(pos int) {\n\tif !t.echo {\n\t\treturn\n\t}\n\n\tx := visualLength(t.prompt) + pos\n\ty := x / t.termWidth\n\tx = x % t.termWidth\n\n\tup := 0\n\tif y < t.cursorY {\n\t\tup = t.cursorY - y\n\t}\n\n\tdown := 0\n\tif y > t.cursorY {\n\t\tdown = y - t.cursorY\n\t}\n\n\tleft := 0\n\tif x < t.cursorX {\n\t\tleft = t.cursorX - x\n\t}\n\n\tright := 0\n\tif x > t.cursorX {\n\t\tright = x - t.cursorX\n\t}\n\n\tt.cursorX = x\n\tt.cursorY = y\n\tt.move(up, down, left, right)\n}\n\nfunc (t *Terminal) move(up, down, left, right int) {\n\tmovement := make([]rune, 3*(up+down+left+right))\n\tm := movement\n\tfor i := 0; i < up; i++ {\n\t\tm[0] = keyEscape\n\t\tm[1] = '['\n\t\tm[2] = 'A'\n\t\tm = m[3:]\n\t}\n\tfor i := 0; i < down; i++ {\n\t\tm[0] = keyEscape\n\t\tm[1] = '['\n\t\tm[2] = 'B'\n\t\tm = m[3:]\n\t}\n\tfor i := 0; i < left; i++ {\n\t\tm[0] = keyEscape\n\t\tm[1] = '['\n\t\tm[2] = 'D'\n\t\tm = m[3:]\n\t}\n\tfor i := 0; i < right; i++ {\n\t\tm[0] = keyEscape\n\t\tm[1] = '['\n\t\tm[2] = 'C'\n\t\tm = m[3:]\n\t}\n\n\tt.queue(movement)\n}\n\nfunc (t *Terminal) clearLineToRight() {\n\top := []rune{keyEscape, '[', 'K'}\n\tt.queue(op)\n}\n\nconst maxLineLength = 4096\n\nfunc (t *Terminal) setLine(newLine []rune, newPos int) {\n\tif t.echo {\n\t\tt.moveCursorToPos(0)\n\t\tt.writeLine(newLine)\n\t\tfor i := len(newLine); i < len(t.line); i++ {\n\t\t\tt.writeLine(space)\n\t\t}\n\t\tt.moveCursorToPos(newPos)\n\t}\n\tt.line = newLine\n\tt.pos = newPos\n}\n\nfunc (t *Terminal) advanceCursor(places int) {\n\tt.cursorX += places\n\tt.cursorY += t.cursorX / t.termWidth\n\tif t.cursorY > t.maxLine {\n\t\tt.maxLine = t.cursorY\n\t}\n\tt.cursorX = t.cursorX % t.termWidth\n\n\tif places > 0 && t.cursorX == 0 {\n\t\t// Normally terminals will advance the current position\n\t\t// when writing a character. But that doesn't happen\n\t\t// for the last character in a line. However, when\n\t\t// writing a character (except a new line) that causes\n\t\t// a line wrap, the position will be advanced two\n\t\t// places.\n\t\t//\n\t\t// So, if we are stopping at the end of a line, we\n\t\t// need to write a newline so that our cursor can be\n\t\t// advanced to the next line.\n\t\tt.outBuf = append(t.outBuf, '\\n')\n\t}\n}\n\nfunc (t *Terminal) eraseNPreviousChars(n int) {\n\tif n == 0 {\n\t\treturn\n\t}\n\n\tif t.pos < n {\n\t\tn = t.pos\n\t}\n\tt.pos -= n\n\tt.moveCursorToPos(t.pos)\n\n\tcopy(t.line[t.pos:], t.line[n+t.pos:])\n\tt.line = t.line[:len(t.line)-n]\n\tif t.echo {\n\t\tt.writeLine(t.line[t.pos:])\n\t\tfor i := 0; i < n; i++ {\n\t\t\tt.queue(space)\n\t\t}\n\t\tt.advanceCursor(n)\n\t\tt.moveCursorToPos(t.pos)\n\t}\n}\n\n// countToLeftWord returns then number of characters from the cursor to the\n// start of the previous word.\nfunc (t *Terminal) countToLeftWord() int {\n\tif t.pos == 0 {\n\t\treturn 0\n\t}\n\n\tpos := t.pos - 1\n\tfor pos > 0 {\n\t\tif t.line[pos] != ' ' {\n\t\t\tbreak\n\t\t}\n\t\tpos--\n\t}\n\tfor pos > 0 {\n\t\tif t.line[pos] == ' ' {\n\t\t\tpos++\n\t\t\tbreak\n\t\t}\n\t\tpos--\n\t}\n\n\treturn t.pos - pos\n}\n\n// countToRightWord returns then number of characters from the cursor to the\n// start of the next word.\nfunc (t *Terminal) countToRightWord() int {\n\tpos := t.pos\n\tfor pos < len(t.line) {\n\t\tif t.line[pos] == ' ' {\n\t\t\tbreak\n\t\t}\n\t\tpos++\n\t}\n\tfor pos < len(t.line) {\n\t\tif t.line[pos] != ' ' {\n\t\t\tbreak\n\t\t}\n\t\tpos++\n\t}\n\treturn pos - t.pos\n}\n\n// visualLength returns the number of visible glyphs in s.\nfunc visualLength(runes []rune) int {\n\tinEscapeSeq := false\n\tlength := 0\n\n\tfor _, r := range runes {\n\t\tswitch {\n\t\tcase inEscapeSeq:\n\t\t\tif (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {\n\t\t\t\tinEscapeSeq = false\n\t\t\t}\n\t\tcase r == '\\x1b':\n\t\t\tinEscapeSeq = true\n\t\tdefault:\n\t\t\tlength++\n\t\t}\n\t}\n\n\treturn length\n}\n\n// handleKey processes the given key and, optionally, returns a line of text\n// that the user has entered.\nfunc (t *Terminal) handleKey(key rune) (line string, ok bool) {\n\tif t.pasteActive && key != keyEnter {\n\t\tt.addKeyToLine(key)\n\t\treturn\n\t}\n\n\tswitch key {\n\tcase keyBackspace:\n\t\tif t.pos == 0 {\n\t\t\treturn\n\t\t}\n\t\tt.eraseNPreviousChars(1)\n\tcase keyAltLeft:\n\t\t// move left by a word.\n\t\tt.pos -= t.countToLeftWord()\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyAltRight:\n\t\t// move right by a word.\n\t\tt.pos += t.countToRightWord()\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyLeft:\n\t\tif t.pos == 0 {\n\t\t\treturn\n\t\t}\n\t\tt.pos--\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyRight:\n\t\tif t.pos == len(t.line) {\n\t\t\treturn\n\t\t}\n\t\tt.pos++\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyHome:\n\t\tif t.pos == 0 {\n\t\t\treturn\n\t\t}\n\t\tt.pos = 0\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyEnd:\n\t\tif t.pos == len(t.line) {\n\t\t\treturn\n\t\t}\n\t\tt.pos = len(t.line)\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyUp:\n\t\tentry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)\n\t\tif !ok {\n\t\t\treturn \"\", false\n\t\t}\n\t\tif t.historyIndex == -1 {\n\t\t\tt.historyPending = string(t.line)\n\t\t}\n\t\tt.historyIndex++\n\t\trunes := []rune(entry)\n\t\tt.setLine(runes, len(runes))\n\tcase keyDown:\n\t\tswitch t.historyIndex {\n\t\tcase -1:\n\t\t\treturn\n\t\tcase 0:\n\t\t\trunes := []rune(t.historyPending)\n\t\t\tt.setLine(runes, len(runes))\n\t\t\tt.historyIndex--\n\t\tdefault:\n\t\t\tentry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)\n\t\t\tif ok {\n\t\t\t\tt.historyIndex--\n\t\t\t\trunes := []rune(entry)\n\t\t\t\tt.setLine(runes, len(runes))\n\t\t\t}\n\t\t}\n\tcase keyEnter:\n\t\tt.moveCursorToPos(len(t.line))\n\t\tt.queue([]rune(\"\\r\\n\"))\n\t\tline = string(t.line)\n\t\tok = true\n\t\tt.line = t.line[:0]\n\t\tt.pos = 0\n\t\tt.cursorX = 0\n\t\tt.cursorY = 0\n\t\tt.maxLine = 0\n\tcase keyDeleteWord:\n\t\t// Delete zero or more spaces and then one or more characters.\n\t\tt.eraseNPreviousChars(t.countToLeftWord())\n\tcase keyDeleteLine:\n\t\t// Delete everything from the current cursor position to the\n\t\t// end of line.\n\t\tfor i := t.pos; i < len(t.line); i++ {\n\t\t\tt.queue(space)\n\t\t\tt.advanceCursor(1)\n\t\t}\n\t\tt.line = t.line[:t.pos]\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyCtrlD:\n\t\t// Erase the character under the current position.\n\t\t// The EOF case when the line is empty is handled in\n\t\t// readLine().\n\t\tif t.pos < len(t.line) {\n\t\t\tt.pos++\n\t\t\tt.eraseNPreviousChars(1)\n\t\t}\n\tcase keyCtrlU:\n\t\tt.eraseNPreviousChars(t.pos)\n\tcase keyClearScreen:\n\t\t// Erases the screen and moves the cursor to the home position.\n\t\tt.queue([]rune(\"\\x1b[2J\\x1b[H\"))\n\t\tt.queue(t.prompt)\n\t\tt.cursorX, t.cursorY = 0, 0\n\t\tt.advanceCursor(visualLength(t.prompt))\n\t\tt.setLine(t.line, t.pos)\n\tdefault:\n\t\tif t.AutoCompleteCallback != nil {\n\t\t\tprefix := string(t.line[:t.pos])\n\t\t\tsuffix := string(t.line[t.pos:])\n\n\t\t\tt.lock.Unlock()\n\t\t\tnewLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)\n\t\t\tt.lock.Lock()\n\n\t\t\tif completeOk {\n\t\t\t\tt.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif !isPrintable(key) {\n\t\t\treturn\n\t\t}\n\t\tif len(t.line) == maxLineLength {\n\t\t\treturn\n\t\t}\n\t\tt.addKeyToLine(key)\n\t}\n\treturn\n}\n\n// addKeyToLine inserts the given key at the current position in the current\n// line.\nfunc (t *Terminal) addKeyToLine(key rune) {\n\tif len(t.line) == cap(t.line) {\n\t\tnewLine := make([]rune, len(t.line), 2*(1+len(t.line)))\n\t\tcopy(newLine, t.line)\n\t\tt.line = newLine\n\t}\n\tt.line = t.line[:len(t.line)+1]\n\tcopy(t.line[t.pos+1:], t.line[t.pos:])\n\tt.line[t.pos] = key\n\tif t.echo {\n\t\tt.writeLine(t.line[t.pos:])\n\t}\n\tt.pos++\n\tt.moveCursorToPos(t.pos)\n}\n\nfunc (t *Terminal) writeLine(line []rune) {\n\tfor len(line) != 0 {\n\t\tremainingOnLine := t.termWidth - t.cursorX\n\t\ttodo := len(line)\n\t\tif todo > remainingOnLine {\n\t\t\ttodo = remainingOnLine\n\t\t}\n\t\tt.queue(line[:todo])\n\t\tt.advanceCursor(visualLength(line[:todo]))\n\t\tline = line[todo:]\n\t}\n}\n\nfunc (t *Terminal) Write(buf []byte) (n int, err error) {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\n\tif t.cursorX == 0 && t.cursorY == 0 {\n\t\t// This is the easy case: there's nothing on the screen that we\n\t\t// have to move out of the way.\n\t\treturn t.c.Write(buf)\n\t}\n\n\t// We have a prompt and possibly user input on the screen. We\n\t// have to clear it first.\n\tt.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)\n\tt.cursorX = 0\n\tt.clearLineToRight()\n\n\tfor t.cursorY > 0 {\n\t\tt.move(1 /* up */, 0, 0, 0)\n\t\tt.cursorY--\n\t\tt.clearLineToRight()\n\t}\n\n\tif _, err = t.c.Write(t.outBuf); err != nil {\n\t\treturn\n\t}\n\tt.outBuf = t.outBuf[:0]\n\n\tif n, err = t.c.Write(buf); err != nil {\n\t\treturn\n\t}\n\n\tt.writeLine(t.prompt)\n\tif t.echo {\n\t\tt.writeLine(t.line)\n\t}\n\n\tt.moveCursorToPos(t.pos)\n\n\tif _, err = t.c.Write(t.outBuf); err != nil {\n\t\treturn\n\t}\n\tt.outBuf = t.outBuf[:0]\n\treturn\n}\n\n// ReadPassword temporarily changes the prompt and reads a password, without\n// echo, from the terminal.\nfunc (t *Terminal) ReadPassword(prompt string) (line string, err error) {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\n\toldPrompt := t.prompt\n\tt.prompt = []rune(prompt)\n\tt.echo = false\n\n\tline, err = t.readLine()\n\n\tt.prompt = oldPrompt\n\tt.echo = true\n\n\treturn\n}\n\n// ReadLine returns a line of input from the terminal.\nfunc (t *Terminal) ReadLine() (line string, err error) {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\n\treturn t.readLine()\n}\n\nfunc (t *Terminal) readLine() (line string, err error) {\n\t// t.lock must be held at this point\n\n\tif t.cursorX == 0 && t.cursorY == 0 {\n\t\tt.writeLine(t.prompt)\n\t\tt.c.Write(t.outBuf)\n\t\tt.outBuf = t.outBuf[:0]\n\t}\n\n\tlineIsPasted := t.pasteActive\n\n\tfor {\n\t\trest := t.remainder\n\t\tlineOk := false\n\t\tfor !lineOk {\n\t\t\tvar key rune\n\t\t\tkey, rest = bytesToKey(rest, t.pasteActive)\n\t\t\tif key == utf8.RuneError {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !t.pasteActive {\n\t\t\t\tif key == keyCtrlD {\n\t\t\t\t\tif len(t.line) == 0 {\n\t\t\t\t\t\treturn \"\", io.EOF\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif key == keyPasteStart {\n\t\t\t\t\tt.pasteActive = true\n\t\t\t\t\tif len(t.line) == 0 {\n\t\t\t\t\t\tlineIsPasted = true\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else if key == keyPasteEnd {\n\t\t\t\tt.pasteActive = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !t.pasteActive {\n\t\t\t\tlineIsPasted = false\n\t\t\t}\n\t\t\tline, lineOk = t.handleKey(key)\n\t\t}\n\t\tif len(rest) > 0 {\n\t\t\tn := copy(t.inBuf[:], rest)\n\t\t\tt.remainder = t.inBuf[:n]\n\t\t} else {\n\t\t\tt.remainder = nil\n\t\t}\n\t\tt.c.Write(t.outBuf)\n\t\tt.outBuf = t.outBuf[:0]\n\t\tif lineOk {\n\t\t\tif t.echo {\n\t\t\t\tt.historyIndex = -1\n\t\t\t\tt.history.Add(line)\n\t\t\t}\n\t\t\tif lineIsPasted {\n\t\t\t\terr = ErrPasteIndicator\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// t.remainder is a slice at the beginning of t.inBuf\n\t\t// containing a partial key sequence\n\t\treadBuf := t.inBuf[len(t.remainder):]\n\t\tvar n int\n\n\t\tt.lock.Unlock()\n\t\tn, err = t.c.Read(readBuf)\n\t\tt.lock.Lock()\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tt.remainder = t.inBuf[:n+len(t.remainder)]\n\t}\n\n\tpanic(\"unreachable\") // for Go 1.0.\n}\n\n// SetPrompt sets the prompt to be used when reading subsequent lines.\nfunc (t *Terminal) SetPrompt(prompt string) {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\n\tt.prompt = []rune(prompt)\n}\n\nfunc (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {\n\t// Move cursor to column zero at the start of the line.\n\tt.move(t.cursorY, 0, t.cursorX, 0)\n\tt.cursorX, t.cursorY = 0, 0\n\tt.clearLineToRight()\n\tfor t.cursorY < numPrevLines {\n\t\t// Move down a line\n\t\tt.move(0, 1, 0, 0)\n\t\tt.cursorY++\n\t\tt.clearLineToRight()\n\t}\n\t// Move back to beginning.\n\tt.move(t.cursorY, 0, 0, 0)\n\tt.cursorX, t.cursorY = 0, 0\n\n\tt.queue(t.prompt)\n\tt.advanceCursor(visualLength(t.prompt))\n\tt.writeLine(t.line)\n\tt.moveCursorToPos(t.pos)\n}\n\nfunc (t *Terminal) SetSize(width, height int) error {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\n\tif width == 0 {\n\t\twidth = 1\n\t}\n\n\toldWidth := t.termWidth\n\tt.termWidth, t.termHeight = width, height\n\n\tswitch {\n\tcase width == oldWidth:\n\t\t// If the width didn't change then nothing else needs to be\n\t\t// done.\n\t\treturn nil\n\tcase len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:\n\t\t// If there is nothing on current line and no prompt printed,\n\t\t// just do nothing\n\t\treturn nil\n\tcase width < oldWidth:\n\t\t// Some terminals (e.g. xterm) will truncate lines that were\n\t\t// too long when shinking. Others, (e.g. gnome-terminal) will\n\t\t// attempt to wrap them. For the former, repainting t.maxLine\n\t\t// works great, but that behaviour goes badly wrong in the case\n\t\t// of the latter because they have doubled every full line.\n\n\t\t// We assume that we are working on a terminal that wraps lines\n\t\t// and adjust the cursor position based on every previous line\n\t\t// wrapping and turning into two. This causes the prompt on\n\t\t// xterms to move upwards, which isn't great, but it avoids a\n\t\t// huge mess with gnome-terminal.\n\t\tif t.cursorX >= t.termWidth {\n\t\t\tt.cursorX = t.termWidth - 1\n\t\t}\n\t\tt.cursorY *= 2\n\t\tt.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)\n\tcase width > oldWidth:\n\t\t// If the terminal expands then our position calculations will\n\t\t// be wrong in the future because we think the cursor is\n\t\t// |t.pos| chars into the string, but there will be a gap at\n\t\t// the end of any wrapped line.\n\t\t//\n\t\t// But the position will actually be correct until we move, so\n\t\t// we can move back to the beginning and repaint everything.\n\t\tt.clearAndRepaintLinePlusNPrevious(t.maxLine)\n\t}\n\n\t_, err := t.c.Write(t.outBuf)\n\tt.outBuf = t.outBuf[:0]\n\treturn err\n}\n\ntype pasteIndicatorError struct{}\n\nfunc (pasteIndicatorError) Error() string {\n\treturn \"terminal: ErrPasteIndicator not correctly handled\"\n}\n\n// ErrPasteIndicator may be returned from ReadLine as the error, in addition\n// to valid line data. It indicates that bracketed paste mode is enabled and\n// that the returned line consists only of pasted data. Programs may wish to\n// interpret pasted data more literally than typed data.\nvar ErrPasteIndicator = pasteIndicatorError{}\n\n// SetBracketedPasteMode requests that the terminal bracket paste operations\n// with markers. Not all terminals support this but, if it is supported, then\n// enabling this mode will stop any autocomplete callback from running due to\n// pastes. Additionally, any lines that are completely pasted will be returned\n// from ReadLine with the error set to ErrPasteIndicator.\nfunc (t *Terminal) SetBracketedPasteMode(on bool) {\n\tif on {\n\t\tio.WriteString(t.c, \"\\x1b[?2004h\")\n\t} else {\n\t\tio.WriteString(t.c, \"\\x1b[?2004l\")\n\t}\n}\n\n// stRingBuffer is a ring buffer of strings.\ntype stRingBuffer struct {\n\t// entries contains max elements.\n\tentries []string\n\tmax     int\n\t// head contains the index of the element most recently added to the ring.\n\thead int\n\t// size contains the number of elements in the ring.\n\tsize int\n}\n\nfunc (s *stRingBuffer) Add(a string) {\n\tif s.entries == nil {\n\t\tconst defaultNumEntries = 100\n\t\ts.entries = make([]string, defaultNumEntries)\n\t\ts.max = defaultNumEntries\n\t}\n\n\ts.head = (s.head + 1) % s.max\n\ts.entries[s.head] = a\n\tif s.size < s.max {\n\t\ts.size++\n\t}\n}\n\n// NthPreviousEntry returns the value passed to the nth previous call to Add.\n// If n is zero then the immediately prior value is returned, if one, then the\n// next most recent, and so on. If such an element doesn't exist then ok is\n// false.\nfunc (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {\n\tif n >= s.size {\n\t\treturn \"\", false\n\t}\n\tindex := s.head - n\n\tif index < 0 {\n\t\tindex += s.max\n\t}\n\treturn s.entries[index], true\n}\n"
  },
  {
    "path": "vendor/golang.org/x/crypto/ssh/terminal/util.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd\n\n// Package terminal provides support functions for dealing with terminals, as\n// commonly found on UNIX systems.\n//\n// Putting a terminal into raw mode is the most common requirement:\n//\n// \toldState, err := terminal.MakeRaw(0)\n// \tif err != nil {\n// \t        panic(err)\n// \t}\n// \tdefer terminal.Restore(0, oldState)\npackage terminal\n\nimport (\n\t\"io\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n// State contains the state of a terminal.\ntype State struct {\n\ttermios syscall.Termios\n}\n\n// IsTerminal returns true if the given file descriptor is a terminal.\nfunc IsTerminal(fd int) bool {\n\tvar termios syscall.Termios\n\t_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)\n\treturn err == 0\n}\n\n// MakeRaw put the terminal connected to the given file descriptor into raw\n// mode and returns the previous state of the terminal so that it can be\n// restored.\nfunc MakeRaw(fd int) (*State, error) {\n\tvar oldState State\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\n\tnewState := oldState.termios\n\tnewState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF\n\tnewState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\n\treturn &oldState, nil\n}\n\n// GetState returns the current state of a terminal which may be useful to\n// restore the terminal after a signal.\nfunc GetState(fd int) (*State, error) {\n\tvar oldState State\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\n\treturn &oldState, nil\n}\n\n// Restore restores the terminal connected to the given file descriptor to a\n// previous state.\nfunc Restore(fd int, state *State) error {\n\t_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)\n\treturn err\n}\n\n// GetSize returns the dimensions of the given terminal.\nfunc GetSize(fd int) (width, height int, err error) {\n\tvar dimensions [4]uint16\n\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {\n\t\treturn -1, -1, err\n\t}\n\treturn int(dimensions[1]), int(dimensions[0]), nil\n}\n\n// ReadPassword reads a line of input from a terminal without local echo.  This\n// is commonly used for inputting passwords and other sensitive data. The slice\n// returned does not include the \\n.\nfunc ReadPassword(fd int) ([]byte, error) {\n\tvar oldState syscall.Termios\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\n\tnewState := oldState\n\tnewState.Lflag &^= syscall.ECHO\n\tnewState.Lflag |= syscall.ICANON | syscall.ISIG\n\tnewState.Iflag |= syscall.ICRNL\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\n\tdefer func() {\n\t\tsyscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)\n\t}()\n\n\tvar buf [16]byte\n\tvar ret []byte\n\tfor {\n\t\tn, err := syscall.Read(fd, buf[:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif n == 0 {\n\t\t\tif len(ret) == 0 {\n\t\t\t\treturn nil, io.EOF\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif buf[n-1] == '\\n' {\n\t\t\tn--\n\t\t}\n\t\tret = append(ret, buf[:n]...)\n\t\tif n < len(buf) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage terminal\n\nimport \"syscall\"\n\nconst ioctlReadTermios = syscall.TIOCGETA\nconst ioctlWriteTermios = syscall.TIOCSETA\n"
  },
  {
    "path": "vendor/golang.org/x/crypto/ssh/terminal/util_linux.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage terminal\n\n// These constants are declared here, rather than importing\n// them from the syscall package as some syscall packages, even\n// on linux, for example gccgo, do not declare them.\nconst ioctlReadTermios = 0x5401  // syscall.TCGETS\nconst ioctlWriteTermios = 0x5402 // syscall.TCSETS\n"
  },
  {
    "path": "vendor/golang.org/x/crypto/ssh/terminal/util_windows.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build windows\n\n// Package terminal provides support functions for dealing with terminals, as\n// commonly found on UNIX systems.\n//\n// Putting a terminal into raw mode is the most common requirement:\n//\n// \toldState, err := terminal.MakeRaw(0)\n// \tif err != nil {\n// \t        panic(err)\n// \t}\n// \tdefer terminal.Restore(0, oldState)\npackage terminal\n\nimport (\n\t\"io\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst (\n\tenableLineInput       = 2\n\tenableEchoInput       = 4\n\tenableProcessedInput  = 1\n\tenableWindowInput     = 8\n\tenableMouseInput      = 16\n\tenableInsertMode      = 32\n\tenableQuickEditMode   = 64\n\tenableExtendedFlags   = 128\n\tenableAutoPosition    = 256\n\tenableProcessedOutput = 1\n\tenableWrapAtEolOutput = 2\n)\n\nvar kernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\nvar (\n\tprocGetConsoleMode             = kernel32.NewProc(\"GetConsoleMode\")\n\tprocSetConsoleMode             = kernel32.NewProc(\"SetConsoleMode\")\n\tprocGetConsoleScreenBufferInfo = kernel32.NewProc(\"GetConsoleScreenBufferInfo\")\n)\n\ntype (\n\tshort int16\n\tword  uint16\n\n\tcoord struct {\n\t\tx short\n\t\ty short\n\t}\n\tsmallRect struct {\n\t\tleft   short\n\t\ttop    short\n\t\tright  short\n\t\tbottom short\n\t}\n\tconsoleScreenBufferInfo struct {\n\t\tsize              coord\n\t\tcursorPosition    coord\n\t\tattributes        word\n\t\twindow            smallRect\n\t\tmaximumWindowSize coord\n\t}\n)\n\ntype State struct {\n\tmode uint32\n}\n\n// IsTerminal returns true if the given file descriptor is a terminal.\nfunc IsTerminal(fd int) bool {\n\tvar st uint32\n\tr, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)\n\treturn r != 0 && e == 0\n}\n\n// MakeRaw put the terminal connected to the given file descriptor into raw\n// mode and returns the previous state of the terminal so that it can be\n// restored.\nfunc MakeRaw(fd int) (*State, error) {\n\tvar st uint32\n\t_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)\n\tif e != 0 {\n\t\treturn nil, error(e)\n\t}\n\tst &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)\n\t_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)\n\tif e != 0 {\n\t\treturn nil, error(e)\n\t}\n\treturn &State{st}, nil\n}\n\n// GetState returns the current state of a terminal which may be useful to\n// restore the terminal after a signal.\nfunc GetState(fd int) (*State, error) {\n\tvar st uint32\n\t_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)\n\tif e != 0 {\n\t\treturn nil, error(e)\n\t}\n\treturn &State{st}, nil\n}\n\n// Restore restores the terminal connected to the given file descriptor to a\n// previous state.\nfunc Restore(fd int, state *State) error {\n\t_, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)\n\treturn err\n}\n\n// GetSize returns the dimensions of the given terminal.\nfunc GetSize(fd int) (width, height int, err error) {\n\tvar info consoleScreenBufferInfo\n\t_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)\n\tif e != 0 {\n\t\treturn 0, 0, error(e)\n\t}\n\treturn int(info.size.x), int(info.size.y), nil\n}\n\n// ReadPassword reads a line of input from a terminal without local echo.  This\n// is commonly used for inputting passwords and other sensitive data. The slice\n// returned does not include the \\n.\nfunc ReadPassword(fd int) ([]byte, error) {\n\tvar st uint32\n\t_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)\n\tif e != 0 {\n\t\treturn nil, error(e)\n\t}\n\told := st\n\n\tst &^= (enableEchoInput)\n\tst |= (enableProcessedInput | enableLineInput | enableProcessedOutput)\n\t_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)\n\tif e != 0 {\n\t\treturn nil, error(e)\n\t}\n\n\tdefer func() {\n\t\tsyscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)\n\t}()\n\n\tvar buf [16]byte\n\tvar ret []byte\n\tfor {\n\t\tn, err := syscall.Read(syscall.Handle(fd), buf[:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif n == 0 {\n\t\t\tif len(ret) == 0 {\n\t\t\t\treturn nil, io.EOF\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif buf[n-1] == '\\n' {\n\t\t\tn--\n\t\t}\n\t\tif n > 0 && buf[n-1] == '\\r' {\n\t\t\tn--\n\t\t}\n\t\tret = append(ret, buf[:n]...)\n\t\tif n < len(buf) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/LICENSE",
    "content": "Copyright (c) 2009 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/golang.org/x/net/PATENTS",
    "content": "Additional IP Rights Grant (Patents)\n\n\"This implementation\" means the copyrightable works distributed by\nGoogle as part of the Go project.\n\nGoogle hereby grants to You a perpetual, worldwide, non-exclusive,\nno-charge, royalty-free, irrevocable (except as stated in this section)\npatent license to make, have made, use, offer to sell, sell, import,\ntransfer and otherwise run, modify and propagate the contents of this\nimplementation of Go, where such license applies only to those patent\nclaims, both currently owned or controlled by Google and acquired in\nthe future, licensable by Google that are necessarily infringed by this\nimplementation of Go.  This grant does not include claims that would be\ninfringed only as a consequence of further modification of this\nimplementation.  If you or your agent or exclusive licensee institute or\norder or agree to the institution of patent litigation against any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging\nthat this implementation of Go or any code incorporated within this\nimplementation of Go constitutes direct or contributory patent\ninfringement, or inducement of patent infringement, then any patent\nrights granted to you under this License for this implementation of Go\nshall terminate as of the date such litigation is filed.\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/context.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package context defines the Context type, which carries deadlines,\n// cancelation signals, and other request-scoped values across API boundaries\n// and between processes.\n//\n// Incoming requests to a server should create a Context, and outgoing calls to\n// servers should accept a Context.  The chain of function calls between must\n// propagate the Context, optionally replacing it with a modified copy created\n// using WithDeadline, WithTimeout, WithCancel, or WithValue.\n//\n// Programs that use Contexts should follow these rules to keep interfaces\n// consistent across packages and enable static analysis tools to check context\n// propagation:\n//\n// Do not store Contexts inside a struct type; instead, pass a Context\n// explicitly to each function that needs it.  The Context should be the first\n// parameter, typically named ctx:\n//\n// \tfunc DoSomething(ctx context.Context, arg Arg) error {\n// \t\t// ... use ctx ...\n// \t}\n//\n// Do not pass a nil Context, even if a function permits it.  Pass context.TODO\n// if you are unsure about which Context to use.\n//\n// Use context Values only for request-scoped data that transits processes and\n// APIs, not for passing optional parameters to functions.\n//\n// The same Context may be passed to functions running in different goroutines;\n// Contexts are safe for simultaneous use by multiple goroutines.\n//\n// See http://blog.golang.org/context for example code for a server that uses\n// Contexts.\npackage context\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n)\n\n// A Context carries a deadline, a cancelation signal, and other values across\n// API boundaries.\n//\n// Context's methods may be called by multiple goroutines simultaneously.\ntype Context interface {\n\t// Deadline returns the time when work done on behalf of this context\n\t// should be canceled.  Deadline returns ok==false when no deadline is\n\t// set.  Successive calls to Deadline return the same results.\n\tDeadline() (deadline time.Time, ok bool)\n\n\t// Done returns a channel that's closed when work done on behalf of this\n\t// context should be canceled.  Done may return nil if this context can\n\t// never be canceled.  Successive calls to Done return the same value.\n\t//\n\t// WithCancel arranges for Done to be closed when cancel is called;\n\t// WithDeadline arranges for Done to be closed when the deadline\n\t// expires; WithTimeout arranges for Done to be closed when the timeout\n\t// elapses.\n\t//\n\t// Done is provided for use in select statements:\n\t//\n\t//  // Stream generates values with DoSomething and sends them to out\n\t//  // until DoSomething returns an error or ctx.Done is closed.\n\t//  func Stream(ctx context.Context, out <-chan Value) error {\n\t//  \tfor {\n\t//  \t\tv, err := DoSomething(ctx)\n\t//  \t\tif err != nil {\n\t//  \t\t\treturn err\n\t//  \t\t}\n\t//  \t\tselect {\n\t//  \t\tcase <-ctx.Done():\n\t//  \t\t\treturn ctx.Err()\n\t//  \t\tcase out <- v:\n\t//  \t\t}\n\t//  \t}\n\t//  }\n\t//\n\t// See http://blog.golang.org/pipelines for more examples of how to use\n\t// a Done channel for cancelation.\n\tDone() <-chan struct{}\n\n\t// Err returns a non-nil error value after Done is closed.  Err returns\n\t// Canceled if the context was canceled or DeadlineExceeded if the\n\t// context's deadline passed.  No other values for Err are defined.\n\t// After Done is closed, successive calls to Err return the same value.\n\tErr() error\n\n\t// Value returns the value associated with this context for key, or nil\n\t// if no value is associated with key.  Successive calls to Value with\n\t// the same key returns the same result.\n\t//\n\t// Use context values only for request-scoped data that transits\n\t// processes and API boundaries, not for passing optional parameters to\n\t// functions.\n\t//\n\t// A key identifies a specific value in a Context.  Functions that wish\n\t// to store values in Context typically allocate a key in a global\n\t// variable then use that key as the argument to context.WithValue and\n\t// Context.Value.  A key can be any type that supports equality;\n\t// packages should define keys as an unexported type to avoid\n\t// collisions.\n\t//\n\t// Packages that define a Context key should provide type-safe accessors\n\t// for the values stores using that key:\n\t//\n\t// \t// Package user defines a User type that's stored in Contexts.\n\t// \tpackage user\n\t//\n\t// \timport \"golang.org/x/net/context\"\n\t//\n\t// \t// User is the type of value stored in the Contexts.\n\t// \ttype User struct {...}\n\t//\n\t// \t// key is an unexported type for keys defined in this package.\n\t// \t// This prevents collisions with keys defined in other packages.\n\t// \ttype key int\n\t//\n\t// \t// userKey is the key for user.User values in Contexts.  It is\n\t// \t// unexported; clients use user.NewContext and user.FromContext\n\t// \t// instead of using this key directly.\n\t// \tvar userKey key = 0\n\t//\n\t// \t// NewContext returns a new Context that carries value u.\n\t// \tfunc NewContext(ctx context.Context, u *User) context.Context {\n\t// \t\treturn context.WithValue(ctx, userKey, u)\n\t// \t}\n\t//\n\t// \t// FromContext returns the User value stored in ctx, if any.\n\t// \tfunc FromContext(ctx context.Context) (*User, bool) {\n\t// \t\tu, ok := ctx.Value(userKey).(*User)\n\t// \t\treturn u, ok\n\t// \t}\n\tValue(key interface{}) interface{}\n}\n\n// Canceled is the error returned by Context.Err when the context is canceled.\nvar Canceled = errors.New(\"context canceled\")\n\n// DeadlineExceeded is the error returned by Context.Err when the context's\n// deadline passes.\nvar DeadlineExceeded = errors.New(\"context deadline exceeded\")\n\n// An emptyCtx is never canceled, has no values, and has no deadline.  It is not\n// struct{}, since vars of this type must have distinct addresses.\ntype emptyCtx int\n\nfunc (*emptyCtx) Deadline() (deadline time.Time, ok bool) {\n\treturn\n}\n\nfunc (*emptyCtx) Done() <-chan struct{} {\n\treturn nil\n}\n\nfunc (*emptyCtx) Err() error {\n\treturn nil\n}\n\nfunc (*emptyCtx) Value(key interface{}) interface{} {\n\treturn nil\n}\n\nfunc (e *emptyCtx) String() string {\n\tswitch e {\n\tcase background:\n\t\treturn \"context.Background\"\n\tcase todo:\n\t\treturn \"context.TODO\"\n\t}\n\treturn \"unknown empty Context\"\n}\n\nvar (\n\tbackground = new(emptyCtx)\n\ttodo       = new(emptyCtx)\n)\n\n// Background returns a non-nil, empty Context. It is never canceled, has no\n// values, and has no deadline.  It is typically used by the main function,\n// initialization, and tests, and as the top-level Context for incoming\n// requests.\nfunc Background() Context {\n\treturn background\n}\n\n// TODO returns a non-nil, empty Context.  Code should use context.TODO when\n// it's unclear which Context to use or it's is not yet available (because the\n// surrounding function has not yet been extended to accept a Context\n// parameter).  TODO is recognized by static analysis tools that determine\n// whether Contexts are propagated correctly in a program.\nfunc TODO() Context {\n\treturn todo\n}\n\n// A CancelFunc tells an operation to abandon its work.\n// A CancelFunc does not wait for the work to stop.\n// After the first call, subsequent calls to a CancelFunc do nothing.\ntype CancelFunc func()\n\n// WithCancel returns a copy of parent with a new Done channel. The returned\n// context's Done channel is closed when the returned cancel function is called\n// or when the parent context's Done channel is closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithCancel(parent Context) (ctx Context, cancel CancelFunc) {\n\tc := newCancelCtx(parent)\n\tpropagateCancel(parent, &c)\n\treturn &c, func() { c.cancel(true, Canceled) }\n}\n\n// newCancelCtx returns an initialized cancelCtx.\nfunc newCancelCtx(parent Context) cancelCtx {\n\treturn cancelCtx{\n\t\tContext: parent,\n\t\tdone:    make(chan struct{}),\n\t}\n}\n\n// propagateCancel arranges for child to be canceled when parent is.\nfunc propagateCancel(parent Context, child canceler) {\n\tif parent.Done() == nil {\n\t\treturn // parent is never canceled\n\t}\n\tif p, ok := parentCancelCtx(parent); ok {\n\t\tp.mu.Lock()\n\t\tif p.err != nil {\n\t\t\t// parent has already been canceled\n\t\t\tchild.cancel(false, p.err)\n\t\t} else {\n\t\t\tif p.children == nil {\n\t\t\t\tp.children = make(map[canceler]bool)\n\t\t\t}\n\t\t\tp.children[child] = true\n\t\t}\n\t\tp.mu.Unlock()\n\t} else {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-parent.Done():\n\t\t\t\tchild.cancel(false, parent.Err())\n\t\t\tcase <-child.Done():\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// parentCancelCtx follows a chain of parent references until it finds a\n// *cancelCtx.  This function understands how each of the concrete types in this\n// package represents its parent.\nfunc parentCancelCtx(parent Context) (*cancelCtx, bool) {\n\tfor {\n\t\tswitch c := parent.(type) {\n\t\tcase *cancelCtx:\n\t\t\treturn c, true\n\t\tcase *timerCtx:\n\t\t\treturn &c.cancelCtx, true\n\t\tcase *valueCtx:\n\t\t\tparent = c.Context\n\t\tdefault:\n\t\t\treturn nil, false\n\t\t}\n\t}\n}\n\n// removeChild removes a context from its parent.\nfunc removeChild(parent Context, child canceler) {\n\tp, ok := parentCancelCtx(parent)\n\tif !ok {\n\t\treturn\n\t}\n\tp.mu.Lock()\n\tif p.children != nil {\n\t\tdelete(p.children, child)\n\t}\n\tp.mu.Unlock()\n}\n\n// A canceler is a context type that can be canceled directly.  The\n// implementations are *cancelCtx and *timerCtx.\ntype canceler interface {\n\tcancel(removeFromParent bool, err error)\n\tDone() <-chan struct{}\n}\n\n// A cancelCtx can be canceled.  When canceled, it also cancels any children\n// that implement canceler.\ntype cancelCtx struct {\n\tContext\n\n\tdone chan struct{} // closed by the first cancel call.\n\n\tmu       sync.Mutex\n\tchildren map[canceler]bool // set to nil by the first cancel call\n\terr      error             // set to non-nil by the first cancel call\n}\n\nfunc (c *cancelCtx) Done() <-chan struct{} {\n\treturn c.done\n}\n\nfunc (c *cancelCtx) Err() error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.err\n}\n\nfunc (c *cancelCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithCancel\", c.Context)\n}\n\n// cancel closes c.done, cancels each of c's children, and, if\n// removeFromParent is true, removes c from its parent's children.\nfunc (c *cancelCtx) cancel(removeFromParent bool, err error) {\n\tif err == nil {\n\t\tpanic(\"context: internal error: missing cancel error\")\n\t}\n\tc.mu.Lock()\n\tif c.err != nil {\n\t\tc.mu.Unlock()\n\t\treturn // already canceled\n\t}\n\tc.err = err\n\tclose(c.done)\n\tfor child := range c.children {\n\t\t// NOTE: acquiring the child's lock while holding parent's lock.\n\t\tchild.cancel(false, err)\n\t}\n\tc.children = nil\n\tc.mu.Unlock()\n\n\tif removeFromParent {\n\t\tremoveChild(c.Context, c)\n\t}\n}\n\n// WithDeadline returns a copy of the parent context with the deadline adjusted\n// to be no later than d.  If the parent's deadline is already earlier than d,\n// WithDeadline(parent, d) is semantically equivalent to parent.  The returned\n// context's Done channel is closed when the deadline expires, when the returned\n// cancel function is called, or when the parent context's Done channel is\n// closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {\n\tif cur, ok := parent.Deadline(); ok && cur.Before(deadline) {\n\t\t// The current deadline is already sooner than the new one.\n\t\treturn WithCancel(parent)\n\t}\n\tc := &timerCtx{\n\t\tcancelCtx: newCancelCtx(parent),\n\t\tdeadline:  deadline,\n\t}\n\tpropagateCancel(parent, c)\n\td := deadline.Sub(time.Now())\n\tif d <= 0 {\n\t\tc.cancel(true, DeadlineExceeded) // deadline has already passed\n\t\treturn c, func() { c.cancel(true, Canceled) }\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.err == nil {\n\t\tc.timer = time.AfterFunc(d, func() {\n\t\t\tc.cancel(true, DeadlineExceeded)\n\t\t})\n\t}\n\treturn c, func() { c.cancel(true, Canceled) }\n}\n\n// A timerCtx carries a timer and a deadline.  It embeds a cancelCtx to\n// implement Done and Err.  It implements cancel by stopping its timer then\n// delegating to cancelCtx.cancel.\ntype timerCtx struct {\n\tcancelCtx\n\ttimer *time.Timer // Under cancelCtx.mu.\n\n\tdeadline time.Time\n}\n\nfunc (c *timerCtx) Deadline() (deadline time.Time, ok bool) {\n\treturn c.deadline, true\n}\n\nfunc (c *timerCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithDeadline(%s [%s])\", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))\n}\n\nfunc (c *timerCtx) cancel(removeFromParent bool, err error) {\n\tc.cancelCtx.cancel(false, err)\n\tif removeFromParent {\n\t\t// Remove this timerCtx from its parent cancelCtx's children.\n\t\tremoveChild(c.cancelCtx.Context, c)\n\t}\n\tc.mu.Lock()\n\tif c.timer != nil {\n\t\tc.timer.Stop()\n\t\tc.timer = nil\n\t}\n\tc.mu.Unlock()\n}\n\n// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete:\n//\n// \tfunc slowOperationWithTimeout(ctx context.Context) (Result, error) {\n// \t\tctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)\n// \t\tdefer cancel()  // releases resources if slowOperation completes before timeout elapses\n// \t\treturn slowOperation(ctx)\n// \t}\nfunc WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {\n\treturn WithDeadline(parent, time.Now().Add(timeout))\n}\n\n// WithValue returns a copy of parent in which the value associated with key is\n// val.\n//\n// Use context Values only for request-scoped data that transits processes and\n// APIs, not for passing optional parameters to functions.\nfunc WithValue(parent Context, key interface{}, val interface{}) Context {\n\treturn &valueCtx{parent, key, val}\n}\n\n// A valueCtx carries a key-value pair.  It implements Value for that key and\n// delegates all other calls to the embedded Context.\ntype valueCtx struct {\n\tContext\n\tkey, val interface{}\n}\n\nfunc (c *valueCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithValue(%#v, %#v)\", c.Context, c.key, c.val)\n}\n\nfunc (c *valueCtx) Value(key interface{}) interface{} {\n\tif c.key == key {\n\t\treturn c.val\n\t}\n\treturn c.Context.Value(key)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/.travis.yml",
    "content": "language: go\n\ngo:\n  - 1.3\n  - 1.4\n\ninstall:\n  - export GOPATH=\"$HOME/gopath\"\n  - mkdir -p \"$GOPATH/src/golang.org/x\"\n  - mv \"$TRAVIS_BUILD_DIR\" \"$GOPATH/src/golang.org/x/oauth2\"\n  - go get -v -t -d golang.org/x/oauth2/...\n\nscript:\n  - go test -v golang.org/x/oauth2/...\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/AUTHORS",
    "content": "# This source code refers to The Go Authors for copyright purposes.\n# The master list of authors is in the main Go distribution,\n# visible at http://tip.golang.org/AUTHORS.\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/CONTRIBUTING.md",
    "content": "# Contributing to Go\n\nGo is an open source project.\n\nIt is the work of hundreds of contributors. We appreciate your help!\n\n\n## Filing issues\n\nWhen [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:\n\n1. What version of Go are you using (`go version`)?\n2. What operating system and processor architecture are you using?\n3. What did you do?\n4. What did you expect to see?\n5. What did you see instead?\n\nGeneral questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.\nThe gophers there will answer or ask you to file an issue if you've tripped over a bug.\n\n## Contributing code\n\nPlease read the [Contribution Guidelines](https://golang.org/doc/contribute.html)\nbefore sending patches.\n\n**We do not accept GitHub pull requests**\n(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).\n\nUnless otherwise noted, the Go source files are distributed under\nthe BSD-style license found in the LICENSE file.\n\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/CONTRIBUTORS",
    "content": "# This source code was written by the Go contributors.\n# The master list of contributors is in the main Go distribution,\n# visible at http://tip.golang.org/CONTRIBUTORS.\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/LICENSE",
    "content": "Copyright (c) 2009 The oauth2 Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/README.md",
    "content": "# OAuth2 for Go\n\n[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)\n\noauth2 package contains a client implementation for OAuth 2.0 spec.\n\n## Installation\n\n~~~~\ngo get golang.org/x/oauth2\n~~~~\n\nSee godoc for further documentation and examples.\n\n* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)\n* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)\n\n\n## App Engine\n\nIn change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor\nof the [`context.Context`](https://golang.org/x/net/context#Context) type from\nthe `golang.org/x/net/context` package\n\nThis means its no longer possible to use the \"Classic App Engine\"\n`appengine.Context` type with the `oauth2` package. (You're using\nClassic App Engine if you import the package `\"appengine\"`.)\n\nTo work around this, you may use the new `\"google.golang.org/appengine\"`\npackage. This package has almost the same API as the `\"appengine\"` package,\nbut it can be fetched with `go get` and used on \"Managed VMs\" and well as\nClassic App Engine.\n\nSee the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)\nfor information on updating your app.\n\nIf you don't want to update your entire app to use the new App Engine packages,\nyou may use both sets of packages in parallel, using only the new packages\nwith the `oauth2` package.\n\n\timport (\n\t\t\"golang.org/x/net/context\"\n\t\t\"golang.org/x/oauth2\"\n\t\t\"golang.org/x/oauth2/google\"\n\t\tnewappengine \"google.golang.org/appengine\"\n\t\tnewurlfetch \"google.golang.org/appengine/urlfetch\"\n\n\t\t\"appengine\"\n\t)\n\n\tfunc handler(w http.ResponseWriter, r *http.Request) {\n\t\tvar c appengine.Context = appengine.NewContext(r)\n\t\tc.Infof(\"Logging a message with the old package\")\n\n\t\tvar ctx context.Context = newappengine.NewContext(r)\n\t\tclient := &http.Client{\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tSource: google.AppEngineTokenSource(ctx, \"scope\"),\n\t\t\t\tBase:   &newurlfetch.Transport{Context: ctx},\n\t\t\t},\n\t\t}\n\t\tclient.Get(\"...\")\n\t}\n\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/client_appengine.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build appengine appenginevm\n\n// App Engine hooks.\n\npackage oauth2\n\nimport (\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2/internal\"\n\t\"google.golang.org/appengine/urlfetch\"\n)\n\nfunc init() {\n\tinternal.RegisterContextClientFunc(contextClientAppEngine)\n}\n\nfunc contextClientAppEngine(ctx context.Context) (*http.Client, error) {\n\treturn urlfetch.Client(ctx), nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package clientcredentials implements the OAuth2.0 \"client credentials\" token flow,\n// also known as the \"two-legged OAuth 2.0\".\n//\n// This should be used when the client is acting on its own behalf or when the client\n// is the resource owner. It may also be used when requesting access to protected\n// resources based on an authorization previously arranged with the authorization\n// server.\n//\n// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4\npackage clientcredentials\n\nimport (\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/internal\"\n)\n\n// tokenFromInternal maps an *internal.Token struct into\n// an *oauth2.Token struct.\nfunc tokenFromInternal(t *internal.Token) *oauth2.Token {\n\tif t == nil {\n\t\treturn nil\n\t}\n\ttk := &oauth2.Token{\n\t\tAccessToken:  t.AccessToken,\n\t\tTokenType:    t.TokenType,\n\t\tRefreshToken: t.RefreshToken,\n\t\tExpiry:       t.Expiry,\n\t}\n\treturn tk.WithExtra(t.Raw)\n}\n\n// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.\n// This token is then mapped from *internal.Token into an *oauth2.Token which is\n// returned along with an error.\nfunc retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) {\n\ttk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tokenFromInternal(tk), nil\n}\n\n// Client Credentials Config describes a 2-legged OAuth2 flow, with both the\n// client application information and the server's endpoint URLs.\ntype Config struct {\n\t// ClientID is the application's ID.\n\tClientID string\n\n\t// ClientSecret is the application's secret.\n\tClientSecret string\n\n\t// TokenURL is the resource server's token endpoint\n\t// URL. This is a constant specific to each server.\n\tTokenURL string\n\n\t// Scope specifies optional requested permissions.\n\tScopes []string\n}\n\n// Token uses client credentials to retreive a token.\n// The HTTP client to use is derived from the context.\n// If nil, http.DefaultClient is used.\nfunc (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {\n\treturn retrieveToken(ctx, c, url.Values{\n\t\t\"grant_type\": {\"client_credentials\"},\n\t\t\"scope\":      internal.CondVal(strings.Join(c.Scopes, \" \")),\n\t})\n}\n\n// Client returns an HTTP client using the provided token.\n// The token will auto-refresh as necessary. The underlying\n// HTTP transport will be obtained using the provided context.\n// The returned client and its Transport should not be modified.\nfunc (c *Config) Client(ctx context.Context) *http.Client {\n\treturn oauth2.NewClient(ctx, c.TokenSource(ctx))\n}\n\n// TokenSource returns a TokenSource that returns t until t expires,\n// automatically refreshing it as necessary using the provided context and the\n// client ID and client secret.\n//\n// Most users will use Config.Client instead.\nfunc (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {\n\tsource := &tokenSource{\n\t\tctx:  ctx,\n\t\tconf: c,\n\t}\n\treturn oauth2.ReuseTokenSource(nil, source)\n}\n\ntype tokenSource struct {\n\tctx  context.Context\n\tconf *Config\n}\n\n// Token refreshes the token by using a new client credentials request.\n// tokens received this way do not include a refresh token\nfunc (c *tokenSource) Token() (*oauth2.Token, error) {\n\treturn retrieveToken(c.ctx, c.conf, url.Values{\n\t\t\"grant_type\": {\"client_credentials\"},\n\t\t\"scope\":      internal.CondVal(strings.Join(c.conf.Scopes, \" \")),\n\t})\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/facebook/facebook.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package facebook provides constants for using OAuth2 to access Facebook.\npackage facebook\n\nimport (\n\t\"golang.org/x/oauth2\"\n)\n\n// Endpoint is Facebook's OAuth 2.0 endpoint.\nvar Endpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://www.facebook.com/dialog/oauth\",\n\tTokenURL: \"https://graph.facebook.com/oauth/access_token\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/github/github.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package github provides constants for using OAuth2 to access Github.\npackage github\n\nimport (\n\t\"golang.org/x/oauth2\"\n)\n\n// Endpoint is Github's OAuth 2.0 endpoint.\nvar Endpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://github.com/login/oauth/authorize\",\n\tTokenURL: \"https://github.com/login/oauth/access_token\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/google/appengine.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage google\n\nimport (\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n)\n\n// Set at init time by appengine_hook.go. If nil, we're not on App Engine.\nvar appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)\n\n// AppEngineTokenSource returns a token source that fetches tokens\n// issued to the current App Engine application's service account.\n// If you are implementing a 3-legged OAuth 2.0 flow on App Engine\n// that involves user accounts, see oauth2.Config instead.\n//\n// The provided context must have come from appengine.NewContext.\nfunc AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {\n\tif appengineTokenFunc == nil {\n\t\tpanic(\"google: AppEngineTokenSource can only be used on App Engine.\")\n\t}\n\tscopes := append([]string{}, scope...)\n\tsort.Strings(scopes)\n\treturn &appEngineTokenSource{\n\t\tctx:    ctx,\n\t\tscopes: scopes,\n\t\tkey:    strings.Join(scopes, \" \"),\n\t}\n}\n\n// aeTokens helps the fetched tokens to be reused until their expiration.\nvar (\n\taeTokensMu sync.Mutex\n\taeTokens   = make(map[string]*tokenLock) // key is space-separated scopes\n)\n\ntype tokenLock struct {\n\tmu sync.Mutex // guards t; held while fetching or updating t\n\tt  *oauth2.Token\n}\n\ntype appEngineTokenSource struct {\n\tctx    context.Context\n\tscopes []string\n\tkey    string // to aeTokens map; space-separated scopes\n}\n\nfunc (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {\n\tif appengineTokenFunc == nil {\n\t\tpanic(\"google: AppEngineTokenSource can only be used on App Engine.\")\n\t}\n\n\taeTokensMu.Lock()\n\ttok, ok := aeTokens[ts.key]\n\tif !ok {\n\t\ttok = &tokenLock{}\n\t\taeTokens[ts.key] = tok\n\t}\n\taeTokensMu.Unlock()\n\n\ttok.mu.Lock()\n\tdefer tok.mu.Unlock()\n\tif tok.t.Valid() {\n\t\treturn tok.t, nil\n\t}\n\taccess, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttok.t = &oauth2.Token{\n\t\tAccessToken: access,\n\t\tExpiry:      exp,\n\t}\n\treturn tok.t, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/google/appengine_hook.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build appengine appenginevm\n\npackage google\n\nimport \"google.golang.org/appengine\"\n\nfunc init() {\n\tappengineTokenFunc = appengine.AccessToken\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/google/default.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage google\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/jwt\"\n\t\"google.golang.org/cloud/compute/metadata\"\n)\n\n// DefaultClient returns an HTTP Client that uses the\n// DefaultTokenSource to obtain authentication credentials.\n//\n// This client should be used when developing services\n// that run on Google App Engine or Google Compute Engine\n// and use \"Application Default Credentials.\"\n//\n// For more details, see:\n// https://developers.google.com/accounts/docs/application-default-credentials\n//\nfunc DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {\n\tts, err := DefaultTokenSource(ctx, scope...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn oauth2.NewClient(ctx, ts), nil\n}\n\n// DefaultTokenSource is a token source that uses\n// \"Application Default Credentials\".\n//\n// It looks for credentials in the following places,\n// preferring the first location found:\n//\n//   1. A JSON file whose path is specified by the\n//      GOOGLE_APPLICATION_CREDENTIALS environment variable.\n//   2. A JSON file in a location known to the gcloud command-line tool.\n//      On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.\n//      On other systems, $HOME/.config/gcloud/application_default_credentials.json.\n//   3. On Google App Engine it uses the appengine.AccessToken function.\n//   4. On Google Compute Engine, it fetches credentials from the metadata server.\n//      (In this final case any provided scopes are ignored.)\n//\n// For more details, see:\n// https://developers.google.com/accounts/docs/application-default-credentials\n//\nfunc DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {\n\t// First, try the environment variable.\n\tconst envVar = \"GOOGLE_APPLICATION_CREDENTIALS\"\n\tif filename := os.Getenv(envVar); filename != \"\" {\n\t\tts, err := tokenSourceFromFile(ctx, filename, scope)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"google: error getting credentials using %v environment variable: %v\", envVar, err)\n\t\t}\n\t\treturn ts, nil\n\t}\n\n\t// Second, try a well-known file.\n\tfilename := wellKnownFile()\n\t_, err := os.Stat(filename)\n\tif err == nil {\n\t\tts, err2 := tokenSourceFromFile(ctx, filename, scope)\n\t\tif err2 == nil {\n\t\t\treturn ts, nil\n\t\t}\n\t\terr = err2\n\t} else if os.IsNotExist(err) {\n\t\terr = nil // ignore this error\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"google: error getting credentials using well-known file (%v): %v\", filename, err)\n\t}\n\n\t// Third, if we're on Google App Engine use those credentials.\n\tif appengineTokenFunc != nil {\n\t\treturn AppEngineTokenSource(ctx, scope...), nil\n\t}\n\n\t// Fourth, if we're on Google Compute Engine use the metadata server.\n\tif metadata.OnGCE() {\n\t\treturn ComputeTokenSource(\"\"), nil\n\t}\n\n\t// None are found; return helpful error.\n\tconst url = \"https://developers.google.com/accounts/docs/application-default-credentials\"\n\treturn nil, fmt.Errorf(\"google: could not find default credentials. See %v for more information.\", url)\n}\n\nfunc wellKnownFile() string {\n\tconst f = \"application_default_credentials.json\"\n\tif runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(os.Getenv(\"APPDATA\"), \"gcloud\", f)\n\t}\n\treturn filepath.Join(guessUnixHomeDir(), \".config\", \"gcloud\", f)\n}\n\nfunc tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {\n\tb, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar d struct {\n\t\t// Common fields\n\t\tType     string\n\t\tClientID string `json:\"client_id\"`\n\n\t\t// User Credential fields\n\t\tClientSecret string `json:\"client_secret\"`\n\t\tRefreshToken string `json:\"refresh_token\"`\n\n\t\t// Service Account fields\n\t\tClientEmail  string `json:\"client_email\"`\n\t\tPrivateKeyID string `json:\"private_key_id\"`\n\t\tPrivateKey   string `json:\"private_key\"`\n\t}\n\tif err := json.Unmarshal(b, &d); err != nil {\n\t\treturn nil, err\n\t}\n\tswitch d.Type {\n\tcase \"authorized_user\":\n\t\tcfg := &oauth2.Config{\n\t\t\tClientID:     d.ClientID,\n\t\t\tClientSecret: d.ClientSecret,\n\t\t\tScopes:       append([]string{}, scopes...), // copy\n\t\t\tEndpoint:     Endpoint,\n\t\t}\n\t\ttok := &oauth2.Token{RefreshToken: d.RefreshToken}\n\t\treturn cfg.TokenSource(ctx, tok), nil\n\tcase \"service_account\":\n\t\tcfg := &jwt.Config{\n\t\t\tEmail:      d.ClientEmail,\n\t\t\tPrivateKey: []byte(d.PrivateKey),\n\t\t\tScopes:     append([]string{}, scopes...), // copy\n\t\t\tTokenURL:   JWTTokenURL,\n\t\t}\n\t\treturn cfg.TokenSource(ctx), nil\n\tcase \"\":\n\t\treturn nil, errors.New(\"missing 'type' field in credentials\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown credential type: %q\", d.Type)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/google/google.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package google provides support for making OAuth2 authorized and\n// authenticated HTTP requests to Google APIs.\n// It supports the Web server flow, client-side credentials, service accounts,\n// Google Compute Engine service accounts, and Google App Engine service\n// accounts.\n//\n// For more information, please read\n// https://developers.google.com/accounts/docs/OAuth2\n// and\n// https://developers.google.com/accounts/docs/application-default-credentials.\npackage google\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/jwt\"\n\t\"google.golang.org/cloud/compute/metadata\"\n)\n\n// Endpoint is Google's OAuth 2.0 endpoint.\nvar Endpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://accounts.google.com/o/oauth2/auth\",\n\tTokenURL: \"https://accounts.google.com/o/oauth2/token\",\n}\n\n// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.\nconst JWTTokenURL = \"https://accounts.google.com/o/oauth2/token\"\n\n// ConfigFromJSON uses a Google Developers Console client_credentials.json\n// file to construct a config.\n// client_credentials.json can be downloadable from https://console.developers.google.com,\n// under \"APIs & Auth\" > \"Credentials\". Download the Web application credentials in the\n// JSON format and provide the contents of the file as jsonKey.\nfunc ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {\n\ttype cred struct {\n\t\tClientID     string   `json:\"client_id\"`\n\t\tClientSecret string   `json:\"client_secret\"`\n\t\tRedirectURIs []string `json:\"redirect_uris\"`\n\t\tAuthURI      string   `json:\"auth_uri\"`\n\t\tTokenURI     string   `json:\"token_uri\"`\n\t}\n\tvar j struct {\n\t\tWeb       *cred `json:\"web\"`\n\t\tInstalled *cred `json:\"installed\"`\n\t}\n\tif err := json.Unmarshal(jsonKey, &j); err != nil {\n\t\treturn nil, err\n\t}\n\tvar c *cred\n\tswitch {\n\tcase j.Web != nil:\n\t\tc = j.Web\n\tcase j.Installed != nil:\n\t\tc = j.Installed\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"oauth2/google: no credentials found\")\n\t}\n\tif len(c.RedirectURIs) < 1 {\n\t\treturn nil, errors.New(\"oauth2/google: missing redirect URL in the client_credentials.json\")\n\t}\n\treturn &oauth2.Config{\n\t\tClientID:     c.ClientID,\n\t\tClientSecret: c.ClientSecret,\n\t\tRedirectURL:  c.RedirectURIs[0],\n\t\tScopes:       scope,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL:  c.AuthURI,\n\t\t\tTokenURL: c.TokenURI,\n\t\t},\n\t}, nil\n}\n\n// JWTConfigFromJSON uses a Google Developers service account JSON key file to read\n// the credentials that authorize and authenticate the requests.\n// Create a service account on \"Credentials\" page under \"APIs & Auth\" for your\n// project at https://console.developers.google.com to download a JSON key file.\nfunc JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {\n\tvar key struct {\n\t\tEmail      string `json:\"client_email\"`\n\t\tPrivateKey string `json:\"private_key\"`\n\t}\n\tif err := json.Unmarshal(jsonKey, &key); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &jwt.Config{\n\t\tEmail:      key.Email,\n\t\tPrivateKey: []byte(key.PrivateKey),\n\t\tScopes:     scope,\n\t\tTokenURL:   JWTTokenURL,\n\t}, nil\n}\n\n// ComputeTokenSource returns a token source that fetches access tokens\n// from Google Compute Engine (GCE)'s metadata server. It's only valid to use\n// this token source if your program is running on a GCE instance.\n// If no account is specified, \"default\" is used.\n// Further information about retrieving access tokens from the GCE metadata\n// server can be found at https://cloud.google.com/compute/docs/authentication.\nfunc ComputeTokenSource(account string) oauth2.TokenSource {\n\treturn oauth2.ReuseTokenSource(nil, computeSource{account: account})\n}\n\ntype computeSource struct {\n\taccount string\n}\n\nfunc (cs computeSource) Token() (*oauth2.Token, error) {\n\tif !metadata.OnGCE() {\n\t\treturn nil, errors.New(\"oauth2/google: can't get a token from the metadata service; not running on GCE\")\n\t}\n\tacct := cs.account\n\tif acct == \"\" {\n\t\tacct = \"default\"\n\t}\n\ttokenJSON, err := metadata.Get(\"instance/service-accounts/\" + acct + \"/token\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar res struct {\n\t\tAccessToken  string `json:\"access_token\"`\n\t\tExpiresInSec int    `json:\"expires_in\"`\n\t\tTokenType    string `json:\"token_type\"`\n\t}\n\terr = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2/google: invalid token JSON from metadata: %v\", err)\n\t}\n\tif res.ExpiresInSec == 0 || res.AccessToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"oauth2/google: incomplete token received from metadata\")\n\t}\n\treturn &oauth2.Token{\n\t\tAccessToken: res.AccessToken,\n\t\tTokenType:   res.TokenType,\n\t\tExpiry:      time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),\n\t}, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/google/sdk.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage google\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/internal\"\n)\n\ntype sdkCredentials struct {\n\tData []struct {\n\t\tCredential struct {\n\t\t\tClientID     string     `json:\"client_id\"`\n\t\t\tClientSecret string     `json:\"client_secret\"`\n\t\t\tAccessToken  string     `json:\"access_token\"`\n\t\t\tRefreshToken string     `json:\"refresh_token\"`\n\t\t\tTokenExpiry  *time.Time `json:\"token_expiry\"`\n\t\t} `json:\"credential\"`\n\t\tKey struct {\n\t\t\tAccount string `json:\"account\"`\n\t\t\tScope   string `json:\"scope\"`\n\t\t} `json:\"key\"`\n\t}\n}\n\n// An SDKConfig provides access to tokens from an account already\n// authorized via the Google Cloud SDK.\ntype SDKConfig struct {\n\tconf         oauth2.Config\n\tinitialToken *oauth2.Token\n}\n\n// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK\n// account. If account is empty, the account currently active in\n// Google Cloud SDK properties is used.\n// Google Cloud SDK credentials must be created by running `gcloud auth`\n// before using this function.\n// The Google Cloud SDK is available at https://cloud.google.com/sdk/.\nfunc NewSDKConfig(account string) (*SDKConfig, error) {\n\tconfigPath, err := sdkConfigPath()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2/google: error getting SDK config path: %v\", err)\n\t}\n\tcredentialsPath := filepath.Join(configPath, \"credentials\")\n\tf, err := os.Open(credentialsPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2/google: failed to load SDK credentials: %v\", err)\n\t}\n\tdefer f.Close()\n\n\tvar c sdkCredentials\n\tif err := json.NewDecoder(f).Decode(&c); err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2/google: failed to decode SDK credentials from %q: %v\", credentialsPath, err)\n\t}\n\tif len(c.Data) == 0 {\n\t\treturn nil, fmt.Errorf(\"oauth2/google: no credentials found in %q, run `gcloud auth login` to create one\", credentialsPath)\n\t}\n\tif account == \"\" {\n\t\tpropertiesPath := filepath.Join(configPath, \"properties\")\n\t\tf, err := os.Open(propertiesPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"oauth2/google: failed to load SDK properties: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tini, err := internal.ParseINI(f)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"oauth2/google: failed to parse SDK properties %q: %v\", propertiesPath, err)\n\t\t}\n\t\tcore, ok := ini[\"core\"]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"oauth2/google: failed to find [core] section in %v\", ini)\n\t\t}\n\t\tactive, ok := core[\"account\"]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"oauth2/google: failed to find %q attribute in %v\", \"account\", core)\n\t\t}\n\t\taccount = active\n\t}\n\n\tfor _, d := range c.Data {\n\t\tif account == \"\" || d.Key.Account == account {\n\t\t\tif d.Credential.AccessToken == \"\" && d.Credential.RefreshToken == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"oauth2/google: no token available for account %q\", account)\n\t\t\t}\n\t\t\tvar expiry time.Time\n\t\t\tif d.Credential.TokenExpiry != nil {\n\t\t\t\texpiry = *d.Credential.TokenExpiry\n\t\t\t}\n\t\t\treturn &SDKConfig{\n\t\t\t\tconf: oauth2.Config{\n\t\t\t\t\tClientID:     d.Credential.ClientID,\n\t\t\t\t\tClientSecret: d.Credential.ClientSecret,\n\t\t\t\t\tScopes:       strings.Split(d.Key.Scope, \" \"),\n\t\t\t\t\tEndpoint:     Endpoint,\n\t\t\t\t\tRedirectURL:  \"oob\",\n\t\t\t\t},\n\t\t\t\tinitialToken: &oauth2.Token{\n\t\t\t\t\tAccessToken:  d.Credential.AccessToken,\n\t\t\t\t\tRefreshToken: d.Credential.RefreshToken,\n\t\t\t\t\tExpiry:       expiry,\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"oauth2/google: no such credentials for account %q\", account)\n}\n\n// Client returns an HTTP client using Google Cloud SDK credentials to\n// authorize requests. The token will auto-refresh as necessary. The\n// underlying http.RoundTripper will be obtained using the provided\n// context. The returned client and its Transport should not be\n// modified.\nfunc (c *SDKConfig) Client(ctx context.Context) *http.Client {\n\treturn &http.Client{\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: c.TokenSource(ctx),\n\t\t},\n\t}\n}\n\n// TokenSource returns an oauth2.TokenSource that retrieve tokens from\n// Google Cloud SDK credentials using the provided context.\n// It will returns the current access token stored in the credentials,\n// and refresh it when it expires, but it won't update the credentials\n// with the new access token.\nfunc (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {\n\treturn c.conf.TokenSource(ctx, c.initialToken)\n}\n\n// Scopes are the OAuth 2.0 scopes the current account is authorized for.\nfunc (c *SDKConfig) Scopes() []string {\n\treturn c.conf.Scopes\n}\n\n// sdkConfigPath tries to guess where the gcloud config is located.\n// It can be overridden during tests.\nvar sdkConfigPath = func() (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(os.Getenv(\"APPDATA\"), \"gcloud\"), nil\n\t}\n\thomeDir := guessUnixHomeDir()\n\tif homeDir == \"\" {\n\t\treturn \"\", errors.New(\"unable to get current user home directory: os/user lookup failed; $HOME is empty\")\n\t}\n\treturn filepath.Join(homeDir, \".config\", \"gcloud\"), nil\n}\n\nfunc guessUnixHomeDir() string {\n\tusr, err := user.Current()\n\tif err == nil {\n\t\treturn usr.HomeDir\n\t}\n\treturn os.Getenv(\"HOME\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/internal/oauth2.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package internal contains support packages for oauth2 package.\npackage internal\n\nimport (\n\t\"bufio\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n// ParseKey converts the binary contents of a private key file\n// to an *rsa.PrivateKey. It detects whether the private key is in a\n// PEM container or not. If so, it extracts the the private key\n// from PEM container before conversion. It only supports PEM\n// containers with no passphrase.\nfunc ParseKey(key []byte) (*rsa.PrivateKey, error) {\n\tblock, _ := pem.Decode(key)\n\tif block != nil {\n\t\tkey = block.Bytes\n\t}\n\tparsedKey, err := x509.ParsePKCS8PrivateKey(key)\n\tif err != nil {\n\t\tparsedKey, err = x509.ParsePKCS1PrivateKey(key)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v\", err)\n\t\t}\n\t}\n\tparsed, ok := parsedKey.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn nil, errors.New(\"private key is invalid\")\n\t}\n\treturn parsed, nil\n}\n\nfunc ParseINI(ini io.Reader) (map[string]map[string]string, error) {\n\tresult := map[string]map[string]string{\n\t\t\"\": map[string]string{}, // root section\n\t}\n\tscanner := bufio.NewScanner(ini)\n\tcurrentSection := \"\"\n\tfor scanner.Scan() {\n\t\tline := strings.TrimSpace(scanner.Text())\n\t\tif strings.HasPrefix(line, \";\") {\n\t\t\t// comment.\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(line, \"[\") && strings.HasSuffix(line, \"]\") {\n\t\t\tcurrentSection = strings.TrimSpace(line[1 : len(line)-1])\n\t\t\tresult[currentSection] = map[string]string{}\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.SplitN(line, \"=\", 2)\n\t\tif len(parts) == 2 && parts[0] != \"\" {\n\t\t\tresult[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error scanning ini: %v\", err)\n\t}\n\treturn result, nil\n}\n\nfunc CondVal(v string) []string {\n\tif v == \"\" {\n\t\treturn nil\n\t}\n\treturn []string{v}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/internal/token.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package internal contains support packages for oauth2 package.\npackage internal\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"mime\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// Token represents the crendentials used to authorize\n// the requests to access protected resources on the OAuth 2.0\n// provider's backend.\n//\n// This type is a mirror of oauth2.Token and exists to break\n// an otherwise-circular dependency. Other internal packages\n// should convert this Token into an oauth2.Token before use.\ntype Token struct {\n\t// AccessToken is the token that authorizes and authenticates\n\t// the requests.\n\tAccessToken string\n\n\t// TokenType is the type of token.\n\t// The Type method returns either this or \"Bearer\", the default.\n\tTokenType string\n\n\t// RefreshToken is a token that's used by the application\n\t// (as opposed to the user) to refresh the access token\n\t// if it expires.\n\tRefreshToken string\n\n\t// Expiry is the optional expiration time of the access token.\n\t//\n\t// If zero, TokenSource implementations will reuse the same\n\t// token forever and RefreshToken or equivalent\n\t// mechanisms for that TokenSource will not be used.\n\tExpiry time.Time\n\n\t// Raw optionally contains extra metadata from the server\n\t// when updating a token.\n\tRaw interface{}\n}\n\n// tokenJSON is the struct representing the HTTP response from OAuth2\n// providers returning a token in JSON form.\ntype tokenJSON struct {\n\tAccessToken  string         `json:\"access_token\"`\n\tTokenType    string         `json:\"token_type\"`\n\tRefreshToken string         `json:\"refresh_token\"`\n\tExpiresIn    expirationTime `json:\"expires_in\"` // at least PayPal returns string, while most return number\n\tExpires      expirationTime `json:\"expires\"`    // broken Facebook spelling of expires_in\n}\n\nfunc (e *tokenJSON) expiry() (t time.Time) {\n\tif v := e.ExpiresIn; v != 0 {\n\t\treturn time.Now().Add(time.Duration(v) * time.Second)\n\t}\n\tif v := e.Expires; v != 0 {\n\t\treturn time.Now().Add(time.Duration(v) * time.Second)\n\t}\n\treturn\n}\n\ntype expirationTime int32\n\nfunc (e *expirationTime) UnmarshalJSON(b []byte) error {\n\tvar n json.Number\n\terr := json.Unmarshal(b, &n)\n\tif err != nil {\n\t\treturn err\n\t}\n\ti, err := n.Int64()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*e = expirationTime(i)\n\treturn nil\n}\n\nvar brokenAuthHeaderProviders = []string{\n\t\"https://accounts.google.com/\",\n\t\"https://www.googleapis.com/\",\n\t\"https://github.com/\",\n\t\"https://api.instagram.com/\",\n\t\"https://www.douban.com/\",\n\t\"https://api.dropbox.com/\",\n\t\"https://api.soundcloud.com/\",\n\t\"https://www.linkedin.com/\",\n\t\"https://api.twitch.tv/\",\n\t\"https://oauth.vk.com/\",\n\t\"https://api.odnoklassniki.ru/\",\n\t\"https://connect.stripe.com/\",\n\t\"https://api.pushbullet.com/\",\n\t\"https://oauth.sandbox.trainingpeaks.com/\",\n\t\"https://oauth.trainingpeaks.com/\",\n\t\"https://www.strava.com/oauth/\",\n\t\"https://app.box.com/\",\n\t\"https://test-sandbox.auth.corp.google.com\",\n\t\"https://user.gini.net/\",\n}\n\n// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL\n// implements the OAuth2 spec correctly\n// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.\n// In summary:\n// - Reddit only accepts client secret in the Authorization header\n// - Dropbox accepts either it in URL param or Auth header, but not both.\n// - Google only accepts URL param (not spec compliant?), not Auth header\n// - Stripe only accepts client secret in Auth header with Bearer method, not Basic\nfunc providerAuthHeaderWorks(tokenURL string) bool {\n\tfor _, s := range brokenAuthHeaderProviders {\n\t\tif strings.HasPrefix(tokenURL, s) {\n\t\t\t// Some sites fail to implement the OAuth2 spec fully.\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Assume the provider implements the spec properly\n\t// otherwise. We can add more exceptions as they're\n\t// discovered. We will _not_ be adding configurable hooks\n\t// to this package to let users select server bugs.\n\treturn true\n}\n\nfunc RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {\n\thc, err := ContextClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tv.Set(\"client_id\", ClientID)\n\tbustedAuth := !providerAuthHeaderWorks(TokenURL)\n\tif bustedAuth && ClientSecret != \"\" {\n\t\tv.Set(\"client_secret\", ClientSecret)\n\t}\n\treq, err := http.NewRequest(\"POST\", TokenURL, strings.NewReader(v.Encode()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tif !bustedAuth {\n\t\treq.SetBasicAuth(ClientID, ClientSecret)\n\t}\n\tr, err := hc.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2: cannot fetch token: %v\", err)\n\t}\n\tif code := r.StatusCode; code < 200 || code > 299 {\n\t\treturn nil, fmt.Errorf(\"oauth2: cannot fetch token: %v\\nResponse: %s\", r.Status, body)\n\t}\n\n\tvar token *Token\n\tcontent, _, _ := mime.ParseMediaType(r.Header.Get(\"Content-Type\"))\n\tswitch content {\n\tcase \"application/x-www-form-urlencoded\", \"text/plain\":\n\t\tvals, err := url.ParseQuery(string(body))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttoken = &Token{\n\t\t\tAccessToken:  vals.Get(\"access_token\"),\n\t\t\tTokenType:    vals.Get(\"token_type\"),\n\t\t\tRefreshToken: vals.Get(\"refresh_token\"),\n\t\t\tRaw:          vals,\n\t\t}\n\t\te := vals.Get(\"expires_in\")\n\t\tif e == \"\" {\n\t\t\t// TODO(jbd): Facebook's OAuth2 implementation is broken and\n\t\t\t// returns expires_in field in expires. Remove the fallback to expires,\n\t\t\t// when Facebook fixes their implementation.\n\t\t\te = vals.Get(\"expires\")\n\t\t}\n\t\texpires, _ := strconv.Atoi(e)\n\t\tif expires != 0 {\n\t\t\ttoken.Expiry = time.Now().Add(time.Duration(expires) * time.Second)\n\t\t}\n\tdefault:\n\t\tvar tj tokenJSON\n\t\tif err = json.Unmarshal(body, &tj); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttoken = &Token{\n\t\t\tAccessToken:  tj.AccessToken,\n\t\t\tTokenType:    tj.TokenType,\n\t\t\tRefreshToken: tj.RefreshToken,\n\t\t\tExpiry:       tj.expiry(),\n\t\t\tRaw:          make(map[string]interface{}),\n\t\t}\n\t\tjson.Unmarshal(body, &token.Raw) // no error checks for optional fields\n\t}\n\t// Don't overwrite `RefreshToken` with an empty value\n\t// if this was a token refreshing request.\n\tif token.RefreshToken == \"\" {\n\t\ttoken.RefreshToken = v.Get(\"refresh_token\")\n\t}\n\treturn token, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/internal/transport.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package internal contains support packages for oauth2 package.\npackage internal\n\nimport (\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// HTTPClient is the context key to use with golang.org/x/net/context's\n// WithValue function to associate an *http.Client value with a context.\nvar HTTPClient ContextKey\n\n// ContextKey is just an empty struct. It exists so HTTPClient can be\n// an immutable public variable with a unique type. It's immutable\n// because nobody else can create a ContextKey, being unexported.\ntype ContextKey struct{}\n\n// ContextClientFunc is a func which tries to return an *http.Client\n// given a Context value. If it returns an error, the search stops\n// with that error.  If it returns (nil, nil), the search continues\n// down the list of registered funcs.\ntype ContextClientFunc func(context.Context) (*http.Client, error)\n\nvar contextClientFuncs []ContextClientFunc\n\nfunc RegisterContextClientFunc(fn ContextClientFunc) {\n\tcontextClientFuncs = append(contextClientFuncs, fn)\n}\n\nfunc ContextClient(ctx context.Context) (*http.Client, error) {\n\tfor _, fn := range contextClientFuncs {\n\t\tc, err := fn(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif c != nil {\n\t\t\treturn c, nil\n\t\t}\n\t}\n\tif hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {\n\t\treturn hc, nil\n\t}\n\treturn http.DefaultClient, nil\n}\n\nfunc ContextTransport(ctx context.Context) http.RoundTripper {\n\thc, err := ContextClient(ctx)\n\t// This is a rare error case (somebody using nil on App Engine).\n\tif err != nil {\n\t\treturn ErrorTransport{err}\n\t}\n\treturn hc.Transport\n}\n\n// ErrorTransport returns the specified error on RoundTrip.\n// This RoundTripper should be used in rare error cases where\n// error handling can be postponed to response handling time.\ntype ErrorTransport struct{ Err error }\n\nfunc (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {\n\treturn nil, t.Err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/jws/jws.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package jws provides encoding and decoding utilities for\n// signed JWS messages.\npackage jws\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\n// ClaimSet contains information about the JWT signature including the\n// permissions being requested (scopes), the target of the token, the issuer,\n// the time the token was issued, and the lifetime of the token.\ntype ClaimSet struct {\n\tIss   string `json:\"iss\"`             // email address of the client_id of the application making the access token request\n\tScope string `json:\"scope,omitempty\"` // space-delimited list of the permissions the application requests\n\tAud   string `json:\"aud\"`             // descriptor of the intended target of the assertion (Optional).\n\tExp   int64  `json:\"exp\"`             // the expiration time of the assertion\n\tIat   int64  `json:\"iat\"`             // the time the assertion was issued.\n\tTyp   string `json:\"typ,omitempty\"`   // token type (Optional).\n\n\t// Email for which the application is requesting delegated access (Optional).\n\tSub string `json:\"sub,omitempty\"`\n\n\t// The old name of Sub. Client keeps setting Prn to be\n\t// complaint with legacy OAuth 2.0 providers. (Optional)\n\tPrn string `json:\"prn,omitempty\"`\n\n\t// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3\n\t// This array is marshalled using custom code (see (c *ClaimSet) encode()).\n\tPrivateClaims map[string]interface{} `json:\"-\"`\n\n\texp time.Time\n\tiat time.Time\n}\n\nfunc (c *ClaimSet) encode() (string, error) {\n\tif c.exp.IsZero() || c.iat.IsZero() {\n\t\t// Reverting time back for machines whose time is not perfectly in sync.\n\t\t// If client machine's time is in the future according\n\t\t// to Google servers, an access token will not be issued.\n\t\tnow := time.Now().Add(-10 * time.Second)\n\t\tc.iat = now\n\t\tc.exp = now.Add(time.Hour)\n\t}\n\n\tc.Exp = c.exp.Unix()\n\tc.Iat = c.iat.Unix()\n\n\tb, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(c.PrivateClaims) == 0 {\n\t\treturn base64Encode(b), nil\n\t}\n\n\t// Marshal private claim set and then append it to b.\n\tprv, err := json.Marshal(c.PrivateClaims)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"jws: invalid map of private claims %v\", c.PrivateClaims)\n\t}\n\n\t// Concatenate public and private claim JSON objects.\n\tif !bytes.HasSuffix(b, []byte{'}'}) {\n\t\treturn \"\", fmt.Errorf(\"jws: invalid JSON %s\", b)\n\t}\n\tif !bytes.HasPrefix(prv, []byte{'{'}) {\n\t\treturn \"\", fmt.Errorf(\"jws: invalid JSON %s\", prv)\n\t}\n\tb[len(b)-1] = ','         // Replace closing curly brace with a comma.\n\tb = append(b, prv[1:]...) // Append private claims.\n\treturn base64Encode(b), nil\n}\n\n// Header represents the header for the signed JWS payloads.\ntype Header struct {\n\t// The algorithm used for signature.\n\tAlgorithm string `json:\"alg\"`\n\n\t// Represents the token type.\n\tTyp string `json:\"typ\"`\n}\n\nfunc (h *Header) encode() (string, error) {\n\tb, err := json.Marshal(h)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64Encode(b), nil\n}\n\n// Decode decodes a claim set from a JWS payload.\nfunc Decode(payload string) (*ClaimSet, error) {\n\t// decode returned id token to get expiry\n\ts := strings.Split(payload, \".\")\n\tif len(s) < 2 {\n\t\t// TODO(jbd): Provide more context about the error.\n\t\treturn nil, errors.New(\"jws: invalid token received\")\n\t}\n\tdecoded, err := base64Decode(s[1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &ClaimSet{}\n\terr = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)\n\treturn c, err\n}\n\n// Encode encodes a signed JWS with provided header and claim set.\nfunc Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) {\n\thead, err := header.encode()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcs, err := c.encode()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tss := fmt.Sprintf(\"%s.%s\", head, cs)\n\th := sha256.New()\n\th.Write([]byte(ss))\n\tb, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsig := base64Encode(b)\n\treturn fmt.Sprintf(\"%s.%s\", ss, sig), nil\n}\n\n// base64Encode returns and Base64url encoded version of the input string with any\n// trailing \"=\" stripped.\nfunc base64Encode(b []byte) string {\n\treturn strings.TrimRight(base64.URLEncoding.EncodeToString(b), \"=\")\n}\n\n// base64Decode decodes the Base64url encoded string\nfunc base64Decode(s string) ([]byte, error) {\n\t// add back missing padding\n\tswitch len(s) % 4 {\n\tcase 2:\n\t\ts += \"==\"\n\tcase 3:\n\t\ts += \"=\"\n\t}\n\treturn base64.URLEncoding.DecodeString(s)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/jwt/jwt.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly\n// known as \"two-legged OAuth 2.0\".\n//\n// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12\npackage jwt\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/internal\"\n\t\"golang.org/x/oauth2/jws\"\n)\n\nvar (\n\tdefaultGrantType = \"urn:ietf:params:oauth:grant-type:jwt-bearer\"\n\tdefaultHeader    = &jws.Header{Algorithm: \"RS256\", Typ: \"JWT\"}\n)\n\n// Config is the configuration for using JWT to fetch tokens,\n// commonly known as \"two-legged OAuth 2.0\".\ntype Config struct {\n\t// Email is the OAuth client identifier used when communicating with\n\t// the configured OAuth provider.\n\tEmail string\n\n\t// PrivateKey contains the contents of an RSA private key or the\n\t// contents of a PEM file that contains a private key. The provided\n\t// private key is used to sign JWT payloads.\n\t// PEM containers with a passphrase are not supported.\n\t// Use the following command to convert a PKCS 12 file into a PEM.\n\t//\n\t//    $ openssl pkcs12 -in key.p12 -out key.pem -nodes\n\t//\n\tPrivateKey []byte\n\n\t// Subject is the optional user to impersonate.\n\tSubject string\n\n\t// Scopes optionally specifies a list of requested permission scopes.\n\tScopes []string\n\n\t// TokenURL is the endpoint required to complete the 2-legged JWT flow.\n\tTokenURL string\n}\n\n// TokenSource returns a JWT TokenSource using the configuration\n// in c and the HTTP client from the provided context.\nfunc (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {\n\treturn oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})\n}\n\n// Client returns an HTTP client wrapping the context's\n// HTTP transport and adding Authorization headers with tokens\n// obtained from c.\n//\n// The returned client and its Transport should not be modified.\nfunc (c *Config) Client(ctx context.Context) *http.Client {\n\treturn oauth2.NewClient(ctx, c.TokenSource(ctx))\n}\n\n// jwtSource is a source that always does a signed JWT request for a token.\n// It should typically be wrapped with a reuseTokenSource.\ntype jwtSource struct {\n\tctx  context.Context\n\tconf *Config\n}\n\nfunc (js jwtSource) Token() (*oauth2.Token, error) {\n\tpk, err := internal.ParseKey(js.conf.PrivateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thc := oauth2.NewClient(js.ctx, nil)\n\tclaimSet := &jws.ClaimSet{\n\t\tIss:   js.conf.Email,\n\t\tScope: strings.Join(js.conf.Scopes, \" \"),\n\t\tAud:   js.conf.TokenURL,\n\t}\n\tif subject := js.conf.Subject; subject != \"\" {\n\t\tclaimSet.Sub = subject\n\t\t// prn is the old name of sub. Keep setting it\n\t\t// to be compatible with legacy OAuth 2.0 providers.\n\t\tclaimSet.Prn = subject\n\t}\n\tpayload, err := jws.Encode(defaultHeader, claimSet, pk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tv := url.Values{}\n\tv.Set(\"grant_type\", defaultGrantType)\n\tv.Set(\"assertion\", payload)\n\tresp, err := hc.PostForm(js.conf.TokenURL, v)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2: cannot fetch token: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2: cannot fetch token: %v\", err)\n\t}\n\tif c := resp.StatusCode; c < 200 || c > 299 {\n\t\treturn nil, fmt.Errorf(\"oauth2: cannot fetch token: %v\\nResponse: %s\", resp.Status, body)\n\t}\n\t// tokenRes is the JSON response body.\n\tvar tokenRes struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t\tTokenType   string `json:\"token_type\"`\n\t\tIDToken     string `json:\"id_token\"`\n\t\tExpiresIn   int64  `json:\"expires_in\"` // relative seconds from now\n\t}\n\tif err := json.Unmarshal(body, &tokenRes); err != nil {\n\t\treturn nil, fmt.Errorf(\"oauth2: cannot fetch token: %v\", err)\n\t}\n\ttoken := &oauth2.Token{\n\t\tAccessToken: tokenRes.AccessToken,\n\t\tTokenType:   tokenRes.TokenType,\n\t}\n\traw := make(map[string]interface{})\n\tjson.Unmarshal(body, &raw) // no error checks for optional fields\n\ttoken = token.WithExtra(raw)\n\n\tif secs := tokenRes.ExpiresIn; secs > 0 {\n\t\ttoken.Expiry = time.Now().Add(time.Duration(secs) * time.Second)\n\t}\n\tif v := tokenRes.IDToken; v != \"\" {\n\t\t// decode returned id token to get expiry\n\t\tclaimSet, err := jws.Decode(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"oauth2: error decoding JWT token: %v\", err)\n\t\t}\n\t\ttoken.Expiry = time.Unix(claimSet.Exp, 0)\n\t}\n\treturn token, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/linkedin/linkedin.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package linkedin provides constants for using OAuth2 to access LinkedIn.\npackage linkedin\n\nimport (\n\t\"golang.org/x/oauth2\"\n)\n\n// Endpoint is LinkedIn's OAuth 2.0 endpoint.\nvar Endpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://www.linkedin.com/uas/oauth2/authorization\",\n\tTokenURL: \"https://www.linkedin.com/uas/oauth2/accessToken\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/oauth2.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package oauth2 provides support for making\n// OAuth2 authorized and authenticated HTTP requests.\n// It can additionally grant authorization with Bearer JWT.\npackage oauth2\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2/internal\"\n)\n\n// NoContext is the default context you should supply if not using\n// your own context.Context (see https://golang.org/x/net/context).\nvar NoContext = context.TODO()\n\n// Config describes a typical 3-legged OAuth2 flow, with both the\n// client application information and the server's endpoint URLs.\ntype Config struct {\n\t// ClientID is the application's ID.\n\tClientID string\n\n\t// ClientSecret is the application's secret.\n\tClientSecret string\n\n\t// Endpoint contains the resource server's token endpoint\n\t// URLs. These are constants specific to each server and are\n\t// often available via site-specific packages, such as\n\t// google.Endpoint or github.Endpoint.\n\tEndpoint Endpoint\n\n\t// RedirectURL is the URL to redirect users going through\n\t// the OAuth flow, after the resource owner's URLs.\n\tRedirectURL string\n\n\t// Scope specifies optional requested permissions.\n\tScopes []string\n}\n\n// A TokenSource is anything that can return a token.\ntype TokenSource interface {\n\t// Token returns a token or an error.\n\t// Token must be safe for concurrent use by multiple goroutines.\n\t// The returned Token must not be modified.\n\tToken() (*Token, error)\n}\n\n// Endpoint contains the OAuth 2.0 provider's authorization and token\n// endpoint URLs.\ntype Endpoint struct {\n\tAuthURL  string\n\tTokenURL string\n}\n\nvar (\n\t// AccessTypeOnline and AccessTypeOffline are options passed\n\t// to the Options.AuthCodeURL method. They modify the\n\t// \"access_type\" field that gets sent in the URL returned by\n\t// AuthCodeURL.\n\t//\n\t// Online is the default if neither is specified. If your\n\t// application needs to refresh access tokens when the user\n\t// is not present at the browser, then use offline. This will\n\t// result in your application obtaining a refresh token the\n\t// first time your application exchanges an authorization\n\t// code for a user.\n\tAccessTypeOnline  AuthCodeOption = SetAuthURLParam(\"access_type\", \"online\")\n\tAccessTypeOffline AuthCodeOption = SetAuthURLParam(\"access_type\", \"offline\")\n\n\t// ApprovalForce forces the users to view the consent dialog\n\t// and confirm the permissions request at the URL returned\n\t// from AuthCodeURL, even if they've already done so.\n\tApprovalForce AuthCodeOption = SetAuthURLParam(\"approval_prompt\", \"force\")\n)\n\n// An AuthCodeOption is passed to Config.AuthCodeURL.\ntype AuthCodeOption interface {\n\tsetValue(url.Values)\n}\n\ntype setParam struct{ k, v string }\n\nfunc (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }\n\n// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters\n// to a provider's authorization endpoint.\nfunc SetAuthURLParam(key, value string) AuthCodeOption {\n\treturn setParam{key, value}\n}\n\n// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page\n// that asks for permissions for the required scopes explicitly.\n//\n// State is a token to protect the user from CSRF attacks. You must\n// always provide a non-zero string and validate that it matches the\n// the state query parameter on your redirect callback.\n// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.\n//\n// Opts may include AccessTypeOnline or AccessTypeOffline, as well\n// as ApprovalForce.\nfunc (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(c.Endpoint.AuthURL)\n\tv := url.Values{\n\t\t\"response_type\": {\"code\"},\n\t\t\"client_id\":     {c.ClientID},\n\t\t\"redirect_uri\":  internal.CondVal(c.RedirectURL),\n\t\t\"scope\":         internal.CondVal(strings.Join(c.Scopes, \" \")),\n\t\t\"state\":         internal.CondVal(state),\n\t}\n\tfor _, opt := range opts {\n\t\topt.setValue(v)\n\t}\n\tif strings.Contains(c.Endpoint.AuthURL, \"?\") {\n\t\tbuf.WriteByte('&')\n\t} else {\n\t\tbuf.WriteByte('?')\n\t}\n\tbuf.WriteString(v.Encode())\n\treturn buf.String()\n}\n\n// PasswordCredentialsToken converts a resource owner username and password\n// pair into a token.\n//\n// Per the RFC, this grant type should only be used \"when there is a high\n// degree of trust between the resource owner and the client (e.g., the client\n// is part of the device operating system or a highly privileged application),\n// and when other authorization grant types are not available.\"\n// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.\n//\n// The HTTP client to use is derived from the context.\n// If nil, http.DefaultClient is used.\nfunc (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {\n\treturn retrieveToken(ctx, c, url.Values{\n\t\t\"grant_type\": {\"password\"},\n\t\t\"username\":   {username},\n\t\t\"password\":   {password},\n\t\t\"scope\":      internal.CondVal(strings.Join(c.Scopes, \" \")),\n\t})\n}\n\n// Exchange converts an authorization code into a token.\n//\n// It is used after a resource provider redirects the user back\n// to the Redirect URI (the URL obtained from AuthCodeURL).\n//\n// The HTTP client to use is derived from the context.\n// If a client is not provided via the context, http.DefaultClient is used.\n//\n// The code will be in the *http.Request.FormValue(\"code\"). Before\n// calling Exchange, be sure to validate FormValue(\"state\").\nfunc (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {\n\treturn retrieveToken(ctx, c, url.Values{\n\t\t\"grant_type\":   {\"authorization_code\"},\n\t\t\"code\":         {code},\n\t\t\"redirect_uri\": internal.CondVal(c.RedirectURL),\n\t\t\"scope\":        internal.CondVal(strings.Join(c.Scopes, \" \")),\n\t})\n}\n\n// Client returns an HTTP client using the provided token.\n// The token will auto-refresh as necessary. The underlying\n// HTTP transport will be obtained using the provided context.\n// The returned client and its Transport should not be modified.\nfunc (c *Config) Client(ctx context.Context, t *Token) *http.Client {\n\treturn NewClient(ctx, c.TokenSource(ctx, t))\n}\n\n// TokenSource returns a TokenSource that returns t until t expires,\n// automatically refreshing it as necessary using the provided context.\n//\n// Most users will use Config.Client instead.\nfunc (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {\n\ttkr := &tokenRefresher{\n\t\tctx:  ctx,\n\t\tconf: c,\n\t}\n\tif t != nil {\n\t\ttkr.refreshToken = t.RefreshToken\n\t}\n\treturn &reuseTokenSource{\n\t\tt:   t,\n\t\tnew: tkr,\n\t}\n}\n\n// tokenRefresher is a TokenSource that makes \"grant_type\"==\"refresh_token\"\n// HTTP requests to renew a token using a RefreshToken.\ntype tokenRefresher struct {\n\tctx          context.Context // used to get HTTP requests\n\tconf         *Config\n\trefreshToken string\n}\n\n// WARNING: Token is not safe for concurrent access, as it\n// updates the tokenRefresher's refreshToken field.\n// Within this package, it is used by reuseTokenSource which\n// synchronizes calls to this method with its own mutex.\nfunc (tf *tokenRefresher) Token() (*Token, error) {\n\tif tf.refreshToken == \"\" {\n\t\treturn nil, errors.New(\"oauth2: token expired and refresh token is not set\")\n\t}\n\n\ttk, err := retrieveToken(tf.ctx, tf.conf, url.Values{\n\t\t\"grant_type\":    {\"refresh_token\"},\n\t\t\"refresh_token\": {tf.refreshToken},\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tf.refreshToken != tk.RefreshToken {\n\t\ttf.refreshToken = tk.RefreshToken\n\t}\n\treturn tk, err\n}\n\n// reuseTokenSource is a TokenSource that holds a single token in memory\n// and validates its expiry before each call to retrieve it with\n// Token. If it's expired, it will be auto-refreshed using the\n// new TokenSource.\ntype reuseTokenSource struct {\n\tnew TokenSource // called when t is expired.\n\n\tmu sync.Mutex // guards t\n\tt  *Token\n}\n\n// Token returns the current token if it's still valid, else will\n// refresh the current token (using r.Context for HTTP client\n// information) and return the new one.\nfunc (s *reuseTokenSource) Token() (*Token, error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.t.Valid() {\n\t\treturn s.t, nil\n\t}\n\tt, err := s.new.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.t = t\n\treturn t, nil\n}\n\n// StaticTokenSource returns a TokenSource that always returns the same token.\n// Because the provided token t is never refreshed, StaticTokenSource is only\n// useful for tokens that never expire.\nfunc StaticTokenSource(t *Token) TokenSource {\n\treturn staticTokenSource{t}\n}\n\n// staticTokenSource is a TokenSource that always returns the same Token.\ntype staticTokenSource struct {\n\tt *Token\n}\n\nfunc (s staticTokenSource) Token() (*Token, error) {\n\treturn s.t, nil\n}\n\n// HTTPClient is the context key to use with golang.org/x/net/context's\n// WithValue function to associate an *http.Client value with a context.\nvar HTTPClient internal.ContextKey\n\n// NewClient creates an *http.Client from a Context and TokenSource.\n// The returned client is not valid beyond the lifetime of the context.\n//\n// As a special case, if src is nil, a non-OAuth2 client is returned\n// using the provided context. This exists to support related OAuth2\n// packages.\nfunc NewClient(ctx context.Context, src TokenSource) *http.Client {\n\tif src == nil {\n\t\tc, err := internal.ContextClient(ctx)\n\t\tif err != nil {\n\t\t\treturn &http.Client{Transport: internal.ErrorTransport{err}}\n\t\t}\n\t\treturn c\n\t}\n\treturn &http.Client{\n\t\tTransport: &Transport{\n\t\t\tBase:   internal.ContextTransport(ctx),\n\t\t\tSource: ReuseTokenSource(nil, src),\n\t\t},\n\t}\n}\n\n// ReuseTokenSource returns a TokenSource which repeatedly returns the\n// same token as long as it's valid, starting with t.\n// When its cached token is invalid, a new token is obtained from src.\n//\n// ReuseTokenSource is typically used to reuse tokens from a cache\n// (such as a file on disk) between runs of a program, rather than\n// obtaining new tokens unnecessarily.\n//\n// The initial token t may be nil, in which case the TokenSource is\n// wrapped in a caching version if it isn't one already. This also\n// means it's always safe to wrap ReuseTokenSource around any other\n// TokenSource without adverse effects.\nfunc ReuseTokenSource(t *Token, src TokenSource) TokenSource {\n\t// Don't wrap a reuseTokenSource in itself. That would work,\n\t// but cause an unnecessary number of mutex operations.\n\t// Just build the equivalent one.\n\tif rt, ok := src.(*reuseTokenSource); ok {\n\t\tif t == nil {\n\t\t\t// Just use it directly.\n\t\t\treturn rt\n\t\t}\n\t\tsrc = rt.new\n\t}\n\treturn &reuseTokenSource{\n\t\tt:   t,\n\t\tnew: src,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki.\npackage odnoklassniki\n\nimport (\n\t\"golang.org/x/oauth2\"\n)\n\n// Endpoint is Odnoklassniki's OAuth 2.0 endpoint.\nvar Endpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://www.odnoklassniki.ru/oauth/authorize\",\n\tTokenURL: \"https://api.odnoklassniki.ru/oauth/token.do\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/paypal/paypal.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package paypal provides constants for using OAuth2 to access PayPal.\npackage paypal\n\nimport (\n\t\"golang.org/x/oauth2\"\n)\n\n// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment.\nvar Endpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize\",\n\tTokenURL: \"https://api.paypal.com/v1/identity/openidconnect/tokenservice\",\n}\n\n// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment.\nvar SandboxEndpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize\",\n\tTokenURL: \"https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/token.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage oauth2\n\nimport (\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2/internal\"\n)\n\n// expiryDelta determines how earlier a token should be considered\n// expired than its actual expiration time. It is used to avoid late\n// expirations due to client-server time mismatches.\nconst expiryDelta = 10 * time.Second\n\n// Token represents the crendentials used to authorize\n// the requests to access protected resources on the OAuth 2.0\n// provider's backend.\n//\n// Most users of this package should not access fields of Token\n// directly. They're exported mostly for use by related packages\n// implementing derivative OAuth2 flows.\ntype Token struct {\n\t// AccessToken is the token that authorizes and authenticates\n\t// the requests.\n\tAccessToken string `json:\"access_token\"`\n\n\t// TokenType is the type of token.\n\t// The Type method returns either this or \"Bearer\", the default.\n\tTokenType string `json:\"token_type,omitempty\"`\n\n\t// RefreshToken is a token that's used by the application\n\t// (as opposed to the user) to refresh the access token\n\t// if it expires.\n\tRefreshToken string `json:\"refresh_token,omitempty\"`\n\n\t// Expiry is the optional expiration time of the access token.\n\t//\n\t// If zero, TokenSource implementations will reuse the same\n\t// token forever and RefreshToken or equivalent\n\t// mechanisms for that TokenSource will not be used.\n\tExpiry time.Time `json:\"expiry,omitempty\"`\n\n\t// raw optionally contains extra metadata from the server\n\t// when updating a token.\n\traw interface{}\n}\n\n// Type returns t.TokenType if non-empty, else \"Bearer\".\nfunc (t *Token) Type() string {\n\tif strings.EqualFold(t.TokenType, \"bearer\") {\n\t\treturn \"Bearer\"\n\t}\n\tif strings.EqualFold(t.TokenType, \"mac\") {\n\t\treturn \"MAC\"\n\t}\n\tif strings.EqualFold(t.TokenType, \"basic\") {\n\t\treturn \"Basic\"\n\t}\n\tif t.TokenType != \"\" {\n\t\treturn t.TokenType\n\t}\n\treturn \"Bearer\"\n}\n\n// SetAuthHeader sets the Authorization header to r using the access\n// token in t.\n//\n// This method is unnecessary when using Transport or an HTTP Client\n// returned by this package.\nfunc (t *Token) SetAuthHeader(r *http.Request) {\n\tr.Header.Set(\"Authorization\", t.Type()+\" \"+t.AccessToken)\n}\n\n// WithExtra returns a new Token that's a clone of t, but using the\n// provided raw extra map. This is only intended for use by packages\n// implementing derivative OAuth2 flows.\nfunc (t *Token) WithExtra(extra interface{}) *Token {\n\tt2 := new(Token)\n\t*t2 = *t\n\tt2.raw = extra\n\treturn t2\n}\n\n// Extra returns an extra field.\n// Extra fields are key-value pairs returned by the server as a\n// part of the token retrieval response.\nfunc (t *Token) Extra(key string) interface{} {\n\tif vals, ok := t.raw.(url.Values); ok {\n\t\t// TODO(jbd): Cast numeric values to int64 or float64.\n\t\treturn vals.Get(key)\n\t}\n\tif raw, ok := t.raw.(map[string]interface{}); ok {\n\t\treturn raw[key]\n\t}\n\treturn nil\n}\n\n// expired reports whether the token is expired.\n// t must be non-nil.\nfunc (t *Token) expired() bool {\n\tif t.Expiry.IsZero() {\n\t\treturn false\n\t}\n\treturn t.Expiry.Add(-expiryDelta).Before(time.Now())\n}\n\n// Valid reports whether t is non-nil, has an AccessToken, and is not expired.\nfunc (t *Token) Valid() bool {\n\treturn t != nil && t.AccessToken != \"\" && !t.expired()\n}\n\n// tokenFromInternal maps an *internal.Token struct into\n// a *Token struct.\nfunc tokenFromInternal(t *internal.Token) *Token {\n\tif t == nil {\n\t\treturn nil\n\t}\n\treturn &Token{\n\t\tAccessToken:  t.AccessToken,\n\t\tTokenType:    t.TokenType,\n\t\tRefreshToken: t.RefreshToken,\n\t\tExpiry:       t.Expiry,\n\t\traw:          t.Raw,\n\t}\n}\n\n// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.\n// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along\n// with an error..\nfunc retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {\n\ttk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tokenFromInternal(tk), nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/transport.go",
    "content": "// Copyright 2014 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage oauth2\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net/http\"\n\t\"sync\"\n)\n\n// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,\n// wrapping a base RoundTripper and adding an Authorization header\n// with a token from the supplied Sources.\n//\n// Transport is a low-level mechanism. Most code will use the\n// higher-level Config.Client method instead.\ntype Transport struct {\n\t// Source supplies the token to add to outgoing requests'\n\t// Authorization headers.\n\tSource TokenSource\n\n\t// Base is the base RoundTripper used to make HTTP requests.\n\t// If nil, http.DefaultTransport is used.\n\tBase http.RoundTripper\n\n\tmu     sync.Mutex                      // guards modReq\n\tmodReq map[*http.Request]*http.Request // original -> modified\n}\n\n// RoundTrip authorizes and authenticates the request with an\n// access token. If no token exists or token is expired,\n// tries to refresh/fetch a new token.\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tif t.Source == nil {\n\t\treturn nil, errors.New(\"oauth2: Transport's Source is nil\")\n\t}\n\ttoken, err := t.Source.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq2 := cloneRequest(req) // per RoundTripper contract\n\ttoken.SetAuthHeader(req2)\n\tt.setModReq(req, req2)\n\tres, err := t.base().RoundTrip(req2)\n\tif err != nil {\n\t\tt.setModReq(req, nil)\n\t\treturn nil, err\n\t}\n\tres.Body = &onEOFReader{\n\t\trc: res.Body,\n\t\tfn: func() { t.setModReq(req, nil) },\n\t}\n\treturn res, nil\n}\n\n// CancelRequest cancels an in-flight request by closing its connection.\nfunc (t *Transport) CancelRequest(req *http.Request) {\n\ttype canceler interface {\n\t\tCancelRequest(*http.Request)\n\t}\n\tif cr, ok := t.base().(canceler); ok {\n\t\tt.mu.Lock()\n\t\tmodReq := t.modReq[req]\n\t\tdelete(t.modReq, req)\n\t\tt.mu.Unlock()\n\t\tcr.CancelRequest(modReq)\n\t}\n}\n\nfunc (t *Transport) base() http.RoundTripper {\n\tif t.Base != nil {\n\t\treturn t.Base\n\t}\n\treturn http.DefaultTransport\n}\n\nfunc (t *Transport) setModReq(orig, mod *http.Request) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif t.modReq == nil {\n\t\tt.modReq = make(map[*http.Request]*http.Request)\n\t}\n\tif mod == nil {\n\t\tdelete(t.modReq, orig)\n\t} else {\n\t\tt.modReq[orig] = mod\n\t}\n}\n\n// cloneRequest returns a clone of the provided *http.Request.\n// The clone is a shallow copy of the struct and its Header map.\nfunc cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header, len(r.Header))\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = append([]string(nil), s...)\n\t}\n\treturn r2\n}\n\ntype onEOFReader struct {\n\trc io.ReadCloser\n\tfn func()\n}\n\nfunc (r *onEOFReader) Read(p []byte) (n int, err error) {\n\tn, err = r.rc.Read(p)\n\tif err == io.EOF {\n\t\tr.runFunc()\n\t}\n\treturn\n}\n\nfunc (r *onEOFReader) Close() error {\n\terr := r.rc.Close()\n\tr.runFunc()\n\treturn err\n}\n\nfunc (r *onEOFReader) runFunc() {\n\tif fn := r.fn; fn != nil {\n\t\tfn()\n\t\tr.fn = nil\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/oauth2/vk/vk.go",
    "content": "// Copyright 2015 The oauth2 Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package vk provides constants for using OAuth2 to access VK.com.\npackage vk\n\nimport (\n\t\"golang.org/x/oauth2\"\n)\n\n// Endpoint is VK's OAuth 2.0 endpoint.\nvar Endpoint = oauth2.Endpoint{\n\tAuthURL:  \"https://oauth.vk.com/authorize\",\n\tTokenURL: \"https://oauth.vk.com/access_token\",\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/LICENSE",
    "content": "Copyright (c) 2011 Google Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/google.golang.org/api/bigquery/v2/bigquery-api.json",
    "content": "{\n \"kind\": \"discovery#restDescription\",\n \"etag\": \"\\\"ye6orv2F-1npMW3u9suM3a7C5Bo/n2LVhGPabQO3DmbKxkomJprJEEo\\\"\",\n \"discoveryVersion\": \"v1\",\n \"id\": \"bigquery:v2\",\n \"name\": \"bigquery\",\n \"version\": \"v2\",\n \"revision\": \"20141112\",\n \"title\": \"BigQuery API\",\n \"description\": \"A data platform for customers to create, manage, share and query data.\",\n \"ownerDomain\": \"google.com\",\n \"ownerName\": \"Google\",\n \"icons\": {\n  \"x16\": \"https://www.google.com/images/icons/product/search-16.gif\",\n  \"x32\": \"https://www.google.com/images/icons/product/search-32.gif\"\n },\n \"documentationLink\": \"https://cloud.google.com/bigquery/\",\n \"protocol\": \"rest\",\n \"baseUrl\": \"https://www.googleapis.com/bigquery/v2/\",\n \"basePath\": \"/bigquery/v2/\",\n \"rootUrl\": \"https://www.googleapis.com/\",\n \"servicePath\": \"bigquery/v2/\",\n \"batchPath\": \"batch\",\n \"parameters\": {\n  \"alt\": {\n   \"type\": \"string\",\n   \"description\": \"Data format for the response.\",\n   \"default\": \"json\",\n   \"enum\": [\n    \"csv\",\n    \"json\"\n   ],\n   \"enumDescriptions\": [\n    \"Responses with Content-Type of text/csv\",\n    \"Responses with Content-Type of application/json\"\n   ],\n   \"location\": \"query\"\n  },\n  \"fields\": {\n   \"type\": \"string\",\n   \"description\": \"Selector specifying which fields to include in a partial response.\",\n   \"location\": \"query\"\n  },\n  \"key\": {\n   \"type\": \"string\",\n   \"description\": \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n   \"location\": \"query\"\n  },\n  \"oauth_token\": {\n   \"type\": \"string\",\n   \"description\": \"OAuth 2.0 token for the current user.\",\n   \"location\": \"query\"\n  },\n  \"prettyPrint\": {\n   \"type\": \"boolean\",\n   \"description\": \"Returns response with indentations and line breaks.\",\n   \"default\": \"true\",\n   \"location\": \"query\"\n  },\n  \"quotaUser\": {\n   \"type\": \"string\",\n   \"description\": \"Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.\",\n   \"location\": \"query\"\n  },\n  \"userIp\": {\n   \"type\": \"string\",\n   \"description\": \"IP address of the site where the request originates. Use this if you want to enforce per-user limits.\",\n   \"location\": \"query\"\n  }\n },\n \"auth\": {\n  \"oauth2\": {\n   \"scopes\": {\n    \"https://www.googleapis.com/auth/bigquery\": {\n     \"description\": \"View and manage your data in Google BigQuery\"\n    },\n    \"https://www.googleapis.com/auth/bigquery.insertdata\": {\n     \"description\": \"Insert data into Google BigQuery\"\n    },\n    \"https://www.googleapis.com/auth/cloud-platform\": {\n     \"description\": \"View and manage your data across Google Cloud Platform services\"\n    },\n    \"https://www.googleapis.com/auth/devstorage.full_control\": {\n     \"description\": \"Manage your data and permissions in Google Cloud Storage\"\n    },\n    \"https://www.googleapis.com/auth/devstorage.read_only\": {\n     \"description\": \"View your data in Google Cloud Storage\"\n    },\n    \"https://www.googleapis.com/auth/devstorage.read_write\": {\n     \"description\": \"Manage your data in Google Cloud Storage\"\n    }\n   }\n  }\n },\n \"schemas\": {\n  \"CsvOptions\": {\n   \"id\": \"CsvOptions\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"allowJaggedRows\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.\"\n    },\n    \"allowQuotedNewlines\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.\"\n    },\n    \"encoding\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.\"\n    },\n    \"fieldDelimiter\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \\\"\\\\t\\\" to specify a tab separator. The default value is a comma (',').\"\n    },\n    \"quote\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\\\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.\"\n    },\n    \"skipLeadingRows\": {\n     \"type\": \"integer\",\n     \"description\": \"[Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.\",\n     \"format\": \"int32\"\n    }\n   }\n  },\n  \"Dataset\": {\n   \"id\": \"Dataset\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"access\": {\n     \"type\": \"array\",\n     \"description\": \"[Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"domain\": {\n        \"type\": \"string\",\n        \"description\": \"[Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: \\\"example.com\\\".\"\n       },\n       \"groupByEmail\": {\n        \"type\": \"string\",\n        \"description\": \"[Pick one] An email address of a Google Group to grant access to.\"\n       },\n       \"role\": {\n        \"type\": \"string\",\n        \"description\": \"[Required] Describes the rights granted to the user specified by the other member of the access object. The following string values are supported: READER, WRITER, OWNER.\"\n       },\n       \"specialGroup\": {\n        \"type\": \"string\",\n        \"description\": \"[Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users.\"\n       },\n       \"userByEmail\": {\n        \"type\": \"string\",\n        \"description\": \"[Pick one] An email address of a user to grant access to. For example: fred@example.com.\"\n       },\n       \"view\": {\n        \"$ref\": \"TableReference\",\n        \"description\": \"[Pick one] A view from a different dataset to grant access to. Queries executed against that view will have read access to tables in this dataset. The role field is not required when this field is set. If that view is updated by any user, access to the view needs to be granted again via an update operation.\"\n       }\n      }\n     }\n    },\n    \"creationTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The time when this dataset was created, in milliseconds since the epoch.\",\n     \"format\": \"int64\"\n    },\n    \"datasetReference\": {\n     \"$ref\": \"DatasetReference\",\n     \"description\": \"[Required] A reference that identifies the dataset.\"\n    },\n    \"defaultTableExpirationMs\": {\n     \"type\": \"string\",\n     \"description\": \"[Experimental] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.\",\n     \"format\": \"int64\"\n    },\n    \"description\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] A user-friendly description of the dataset.\"\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] A hash of the resource.\"\n    },\n    \"friendlyName\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] A descriptive name for the dataset.\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The resource type.\",\n     \"default\": \"bigquery#dataset\"\n    },\n    \"lastModifiedTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.\",\n     \"format\": \"int64\"\n    },\n    \"location\": {\n     \"type\": \"string\",\n     \"description\": \"[Experimental] The location where the data resides. If not present, the data will be stored in the US.\"\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.\"\n    }\n   }\n  },\n  \"DatasetList\": {\n   \"id\": \"DatasetList\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"datasets\": {\n     \"type\": \"array\",\n     \"description\": \"An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"datasetReference\": {\n        \"$ref\": \"DatasetReference\",\n        \"description\": \"The dataset reference. Use this property to access specific parts of the dataset's ID, such as project ID or dataset ID.\"\n       },\n       \"friendlyName\": {\n        \"type\": \"string\",\n        \"description\": \"A descriptive name for the dataset, if one exists.\"\n       },\n       \"id\": {\n        \"type\": \"string\",\n        \"description\": \"The fully-qualified, unique, opaque ID of the dataset.\"\n       },\n       \"kind\": {\n        \"type\": \"string\",\n        \"description\": \"The resource type. This property always returns the value \\\"bigquery#dataset\\\".\",\n        \"default\": \"bigquery#dataset\"\n       }\n      }\n     }\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"A hash value of the results page. You can use this property to determine if the page has changed since the last request.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The list type. This property always returns the value \\\"bigquery#datasetList\\\".\",\n     \"default\": \"bigquery#datasetList\"\n    },\n    \"nextPageToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token that can be used to request the next results page. This property is omitted on the final results page.\"\n    }\n   }\n  },\n  \"DatasetReference\": {\n   \"id\": \"DatasetReference\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"datasetId\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.datasets.update\"\n      ]\n     }\n    },\n    \"projectId\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The ID of the project containing this dataset.\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.datasets.update\"\n      ]\n     }\n    }\n   }\n  },\n  \"ErrorProto\": {\n   \"id\": \"ErrorProto\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"debugInfo\": {\n     \"type\": \"string\",\n     \"description\": \"Debugging information. This property is internal to Google and should not be used.\"\n    },\n    \"location\": {\n     \"type\": \"string\",\n     \"description\": \"Specifies where the error occurred, if present.\"\n    },\n    \"message\": {\n     \"type\": \"string\",\n     \"description\": \"A human-readable description of the error.\"\n    },\n    \"reason\": {\n     \"type\": \"string\",\n     \"description\": \"A short error code that summarizes the error.\"\n    }\n   }\n  },\n  \"ExternalDataConfiguration\": {\n   \"id\": \"ExternalDataConfiguration\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"compression\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.\"\n    },\n    \"csvOptions\": {\n     \"$ref\": \"CsvOptions\",\n     \"description\": \"Additional properties to set if sourceFormat is set to CSV.\"\n    },\n    \"ignoreUnknownValues\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns\"\n    },\n    \"maxBadRecords\": {\n     \"type\": \"integer\",\n     \"description\": \"[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.\",\n     \"format\": \"int32\"\n    },\n    \"schema\": {\n     \"$ref\": \"TableSchema\",\n     \"description\": \"[Required] The schema for the data.\"\n    },\n    \"sourceFormat\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The data format. External data sources must be in CSV format. The default value is CSV.\"\n    },\n    \"sourceUris\": {\n     \"type\": \"array\",\n     \"description\": \"[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. CSV limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    }\n   }\n  },\n  \"GetQueryResultsResponse\": {\n   \"id\": \"GetQueryResultsResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"cacheHit\": {\n     \"type\": \"boolean\",\n     \"description\": \"Whether the query result was fetched from the query cache.\"\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"A hash of this response.\"\n    },\n    \"jobComplete\": {\n     \"type\": \"boolean\",\n     \"description\": \"Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available.\"\n    },\n    \"jobReference\": {\n     \"$ref\": \"JobReference\",\n     \"description\": \"Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults).\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The resource type of the response.\",\n     \"default\": \"bigquery#getQueryResultsResponse\"\n    },\n    \"pageToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token used for paging results.\"\n    },\n    \"rows\": {\n     \"type\": \"array\",\n     \"description\": \"An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.\",\n     \"items\": {\n      \"$ref\": \"TableRow\"\n     }\n    },\n    \"schema\": {\n     \"$ref\": \"TableSchema\",\n     \"description\": \"The schema of the results. Present only when the query completes successfully.\"\n    },\n    \"totalBytesProcessed\": {\n     \"type\": \"string\",\n     \"description\": \"The total number of bytes processed for this query.\",\n     \"format\": \"int64\"\n    },\n    \"totalRows\": {\n     \"type\": \"string\",\n     \"description\": \"The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully.\",\n     \"format\": \"uint64\"\n    }\n   }\n  },\n  \"Job\": {\n   \"id\": \"Job\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"configuration\": {\n     \"$ref\": \"JobConfiguration\",\n     \"description\": \"[Required] Describes the job configuration.\"\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] A hash of this resource.\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Opaque ID field of the job\"\n    },\n    \"jobReference\": {\n     \"$ref\": \"JobReference\",\n     \"description\": \"[Optional] Reference describing the unique-per-user name of the job.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The type of the resource.\",\n     \"default\": \"bigquery#job\"\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] A URL that can be used to access this resource again.\"\n    },\n    \"statistics\": {\n     \"$ref\": \"JobStatistics\",\n     \"description\": \"[Output-only] Information about the job, including starting time and ending time of the job.\"\n    },\n    \"status\": {\n     \"$ref\": \"JobStatus\",\n     \"description\": \"[Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete.\"\n    },\n    \"user_email\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Email address of the user who ran the job.\"\n    }\n   }\n  },\n  \"JobConfiguration\": {\n   \"id\": \"JobConfiguration\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"copy\": {\n     \"$ref\": \"JobConfigurationTableCopy\",\n     \"description\": \"[Pick one] Copies a table.\"\n    },\n    \"dryRun\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run. Behavior of non-query jobs is undefined.\"\n    },\n    \"extract\": {\n     \"$ref\": \"JobConfigurationExtract\",\n     \"description\": \"[Pick one] Configures an extract job.\"\n    },\n    \"link\": {\n     \"$ref\": \"JobConfigurationLink\",\n     \"description\": \"[Pick one] Configures a link job.\"\n    },\n    \"load\": {\n     \"$ref\": \"JobConfigurationLoad\",\n     \"description\": \"[Pick one] Configures a load job.\"\n    },\n    \"query\": {\n     \"$ref\": \"JobConfigurationQuery\",\n     \"description\": \"[Pick one] Configures a query job.\"\n    }\n   }\n  },\n  \"JobConfigurationExtract\": {\n   \"id\": \"JobConfigurationExtract\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"compression\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The compression type to use for exported files. Possible values include GZIP and NONE. The default value is NONE.\"\n    },\n    \"destinationFormat\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with nested or repeated fields cannot be exported as CSV.\"\n    },\n    \"destinationUri\": {\n     \"type\": \"string\",\n     \"description\": \"[Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as necessary. The fully-qualified Google Cloud Storage URI where the extracted table should be written.\"\n    },\n    \"destinationUris\": {\n     \"type\": \"array\",\n     \"description\": \"[Pick one] A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    },\n    \"fieldDelimiter\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Delimiter to use between fields in the exported data. Default is ','\"\n    },\n    \"printHeader\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Whether to print out a header row in the results. Default is true.\"\n    },\n    \"sourceTable\": {\n     \"$ref\": \"TableReference\",\n     \"description\": \"[Required] A reference to the table being exported.\"\n    }\n   }\n  },\n  \"JobConfigurationLink\": {\n   \"id\": \"JobConfigurationLink\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"createDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    },\n    \"destinationTable\": {\n     \"$ref\": \"TableReference\",\n     \"description\": \"[Required] The destination table of the link job.\"\n    },\n    \"sourceUri\": {\n     \"type\": \"array\",\n     \"description\": \"[Required] URI of source table to link.\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    },\n    \"writeDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    }\n   }\n  },\n  \"JobConfigurationLoad\": {\n   \"id\": \"JobConfigurationLoad\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"allowJaggedRows\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.\"\n    },\n    \"allowQuotedNewlines\": {\n     \"type\": \"boolean\",\n     \"description\": \"Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.\"\n    },\n    \"createDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    },\n    \"destinationTable\": {\n     \"$ref\": \"TableReference\",\n     \"description\": \"[Required] The destination table to load the data into.\"\n    },\n    \"encoding\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.\"\n    },\n    \"fieldDelimiter\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \\\"\\\\t\\\" to specify a tab separator. The default value is a comma (',').\"\n    },\n    \"ignoreUnknownValues\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names\"\n    },\n    \"maxBadRecords\": {\n     \"type\": \"integer\",\n     \"description\": \"[Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.\",\n     \"format\": \"int32\"\n    },\n    \"projectionFields\": {\n     \"type\": \"array\",\n     \"description\": \"[Experimental] If sourceFormat is set to \\\"DATASTORE_BACKUP\\\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    },\n    \"quote\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\\\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.\"\n    },\n    \"schema\": {\n     \"$ref\": \"TableSchema\",\n     \"description\": \"[Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data.\"\n    },\n    \"schemaInline\": {\n     \"type\": \"string\",\n     \"description\": \"[Deprecated] The inline schema. For CSV schemas, specify as \\\"Field1:Type1[,Field2:Type2]*\\\". For example, \\\"foo:STRING, bar:INTEGER, baz:FLOAT\\\".\"\n    },\n    \"schemaInlineFormat\": {\n     \"type\": \"string\",\n     \"description\": \"[Deprecated] The format of the schemaInline property.\"\n    },\n    \"skipLeadingRows\": {\n     \"type\": \"integer\",\n     \"description\": \"[Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.\",\n     \"format\": \"int32\"\n    },\n    \"sourceFormat\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The format of the data files. For CSV files, specify \\\"CSV\\\". For datastore backups, specify \\\"DATASTORE_BACKUP\\\". For newline-delimited JSON, specify \\\"NEWLINE_DELIMITED_JSON\\\". The default value is CSV.\"\n    },\n    \"sourceUris\": {\n     \"type\": \"array\",\n     \"description\": \"[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    },\n    \"writeDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    }\n   }\n  },\n  \"JobConfigurationQuery\": {\n   \"id\": \"JobConfigurationQuery\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"allowLargeResults\": {\n     \"type\": \"boolean\",\n     \"description\": \"If true, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set.\"\n    },\n    \"createDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    },\n    \"defaultDataset\": {\n     \"$ref\": \"DatasetReference\",\n     \"description\": \"[Optional] Specifies the default dataset to use for unqualified table names in the query.\"\n    },\n    \"destinationTable\": {\n     \"$ref\": \"TableReference\",\n     \"description\": \"[Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results.\"\n    },\n    \"flattenResults\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Flattens all nested and repeated fields in the query results. The default value is true. allowLargeResults must be true if this is set to false.\"\n    },\n    \"preserveNulls\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Deprecated] This property is deprecated.\"\n    },\n    \"priority\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE.\"\n    },\n    \"query\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] BigQuery SQL query to execute.\"\n    },\n    \"tableDefinitions\": {\n     \"type\": \"object\",\n     \"description\": \"[Experimental] If querying an external data source outside of BigQuery, describes the data format, location and other properties of the data source. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.\",\n     \"additionalProperties\": {\n      \"$ref\": \"ExternalDataConfiguration\"\n     }\n    },\n    \"useQueryCache\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified.\"\n    },\n    \"writeDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    }\n   }\n  },\n  \"JobConfigurationTableCopy\": {\n   \"id\": \"JobConfigurationTableCopy\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"createDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    },\n    \"destinationTable\": {\n     \"$ref\": \"TableReference\",\n     \"description\": \"[Required] The destination table\"\n    },\n    \"sourceTable\": {\n     \"$ref\": \"TableReference\",\n     \"description\": \"[Pick one] Source table to copy.\"\n    },\n    \"sourceTables\": {\n     \"type\": \"array\",\n     \"description\": \"[Pick one] Source tables to copy.\",\n     \"items\": {\n      \"$ref\": \"TableReference\"\n     }\n    },\n    \"writeDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion.\"\n    }\n   }\n  },\n  \"JobList\": {\n   \"id\": \"JobList\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"A hash of this page of results.\"\n    },\n    \"jobs\": {\n     \"type\": \"array\",\n     \"description\": \"List of jobs that were requested.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"configuration\": {\n        \"$ref\": \"JobConfiguration\",\n        \"description\": \"[Full-projection-only] Specifies the job configuration.\"\n       },\n       \"errorResult\": {\n        \"$ref\": \"ErrorProto\",\n        \"description\": \"A result object that will be present only if the job has failed.\"\n       },\n       \"id\": {\n        \"type\": \"string\",\n        \"description\": \"Unique opaque ID of the job.\"\n       },\n       \"jobReference\": {\n        \"$ref\": \"JobReference\",\n        \"description\": \"Job reference uniquely identifying the job.\"\n       },\n       \"kind\": {\n        \"type\": \"string\",\n        \"description\": \"The resource type.\",\n        \"default\": \"bigquery#job\"\n       },\n       \"state\": {\n        \"type\": \"string\",\n        \"description\": \"Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed.\"\n       },\n       \"statistics\": {\n        \"$ref\": \"JobStatistics\",\n        \"description\": \"[Output-only] Information about the job, including starting time and ending time of the job.\"\n       },\n       \"status\": {\n        \"$ref\": \"JobStatus\",\n        \"description\": \"[Full-projection-only] Describes the state of the job.\"\n       },\n       \"user_email\": {\n        \"type\": \"string\",\n        \"description\": \"[Full-projection-only] Email address of the user who ran the job.\"\n       }\n      }\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The resource type of the response.\",\n     \"default\": \"bigquery#jobList\"\n    },\n    \"nextPageToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token to request the next page of results.\"\n    },\n    \"totalItems\": {\n     \"type\": \"integer\",\n     \"description\": \"Total number of jobs in this collection.\",\n     \"format\": \"int32\"\n    }\n   }\n  },\n  \"JobReference\": {\n   \"id\": \"JobReference\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"jobId\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.jobs.getQueryResults\"\n      ]\n     }\n    },\n    \"projectId\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] The ID of the project containing this job.\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.jobs.getQueryResults\"\n      ]\n     }\n    }\n   }\n  },\n  \"JobStatistics\": {\n   \"id\": \"JobStatistics\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"creationTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Creation time of this job, in milliseconds since the epoch. This field will be present on all jobs.\",\n     \"format\": \"int64\"\n    },\n    \"endTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] End time of this job, in milliseconds since the epoch. This field will be present whenever a job is in the DONE state.\",\n     \"format\": \"int64\"\n    },\n    \"extract\": {\n     \"$ref\": \"JobStatistics4\",\n     \"description\": \"[Output-only] Statistics for an extract job.\"\n    },\n    \"load\": {\n     \"$ref\": \"JobStatistics3\",\n     \"description\": \"[Output-only] Statistics for a load job.\"\n    },\n    \"query\": {\n     \"$ref\": \"JobStatistics2\",\n     \"description\": \"[Output-only] Statistics for a query job.\"\n    },\n    \"startTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE.\",\n     \"format\": \"int64\"\n    },\n    \"totalBytesProcessed\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] [Deprecated] Use the bytes processed in the query statistics instead.\",\n     \"format\": \"int64\"\n    }\n   }\n  },\n  \"JobStatistics2\": {\n   \"id\": \"JobStatistics2\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"cacheHit\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Output-only] Whether the query result was fetched from the query cache.\"\n    },\n    \"totalBytesProcessed\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Total bytes processed for this job.\",\n     \"format\": \"int64\"\n    }\n   }\n  },\n  \"JobStatistics3\": {\n   \"id\": \"JobStatistics3\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"inputFileBytes\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Number of bytes of source data in a joad job.\",\n     \"format\": \"int64\"\n    },\n    \"inputFiles\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Number of source files in a load job.\",\n     \"format\": \"int64\"\n    },\n    \"outputBytes\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.\",\n     \"format\": \"int64\"\n    },\n    \"outputRows\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.\",\n     \"format\": \"int64\"\n    }\n   }\n  },\n  \"JobStatistics4\": {\n   \"id\": \"JobStatistics4\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"destinationUriFileCounts\": {\n     \"type\": \"array\",\n     \"description\": \"[Experimental] Number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field.\",\n     \"items\": {\n      \"type\": \"string\",\n      \"format\": \"int64\"\n     }\n    }\n   }\n  },\n  \"JobStatus\": {\n   \"id\": \"JobStatus\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"errorResult\": {\n     \"$ref\": \"ErrorProto\",\n     \"description\": \"[Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful.\"\n    },\n    \"errors\": {\n     \"type\": \"array\",\n     \"description\": \"[Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.\",\n     \"items\": {\n      \"$ref\": \"ErrorProto\"\n     }\n    },\n    \"state\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Running state of the job.\"\n    }\n   }\n  },\n  \"JsonObject\": {\n   \"id\": \"JsonObject\",\n   \"type\": \"object\",\n   \"description\": \"Represents a single JSON object.\",\n   \"additionalProperties\": {\n    \"$ref\": \"JsonValue\"\n   }\n  },\n  \"JsonValue\": {\n   \"id\": \"JsonValue\",\n   \"type\": \"any\"\n  },\n  \"ProjectList\": {\n   \"id\": \"ProjectList\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"A hash of the page of results\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The type of list.\",\n     \"default\": \"bigquery#projectList\"\n    },\n    \"nextPageToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token to request the next page of results.\"\n    },\n    \"projects\": {\n     \"type\": \"array\",\n     \"description\": \"Projects to which you have at least READ access.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"friendlyName\": {\n        \"type\": \"string\",\n        \"description\": \"A descriptive name for this project.\"\n       },\n       \"id\": {\n        \"type\": \"string\",\n        \"description\": \"An opaque ID of this project.\"\n       },\n       \"kind\": {\n        \"type\": \"string\",\n        \"description\": \"The resource type.\",\n        \"default\": \"bigquery#project\"\n       },\n       \"numericId\": {\n        \"type\": \"string\",\n        \"description\": \"The numeric ID of this project.\",\n        \"format\": \"uint64\"\n       },\n       \"projectReference\": {\n        \"$ref\": \"ProjectReference\",\n        \"description\": \"A unique reference to this project.\"\n       }\n      }\n     }\n    },\n    \"totalItems\": {\n     \"type\": \"integer\",\n     \"description\": \"The total number of projects in the list.\",\n     \"format\": \"int32\"\n    }\n   }\n  },\n  \"ProjectReference\": {\n   \"id\": \"ProjectReference\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"projectId\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] ID of the project. Can be either the numeric ID or the assigned ID of the project.\"\n    }\n   }\n  },\n  \"QueryRequest\": {\n   \"id\": \"QueryRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"defaultDataset\": {\n     \"$ref\": \"DatasetReference\",\n     \"description\": \"[Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'.\"\n    },\n    \"dryRun\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The resource type of the request.\",\n     \"default\": \"bigquery#queryRequest\"\n    },\n    \"maxResults\": {\n     \"type\": \"integer\",\n     \"description\": \"[Optional] The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.\",\n     \"format\": \"uint32\"\n    },\n    \"preserveNulls\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Deprecated] This property is deprecated.\"\n    },\n    \"query\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] A query string, following the BigQuery query syntax, of the query to execute. Example: \\\"SELECT count(f1) FROM [myProjectId:myDatasetId.myTableId]\\\".\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.jobs.query\"\n      ]\n     }\n    },\n    \"timeoutMs\": {\n     \"type\": \"integer\",\n     \"description\": \"[Optional] How long to wait for the query to complete, in milliseconds, before the request times out and returns. Note that this is only a timeout for the request, not the query. If the query takes longer to run than the timeout value, the call returns without any results and with the 'jobComplete' flag set to false. You can call GetQueryResults() to wait for the query to complete and read the results. The default value is 10000 milliseconds (10 seconds).\",\n     \"format\": \"uint32\"\n    },\n    \"useQueryCache\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true.\"\n    }\n   }\n  },\n  \"QueryResponse\": {\n   \"id\": \"QueryResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"cacheHit\": {\n     \"type\": \"boolean\",\n     \"description\": \"Whether the query result was fetched from the query cache.\"\n    },\n    \"jobComplete\": {\n     \"type\": \"boolean\",\n     \"description\": \"Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available.\"\n    },\n    \"jobReference\": {\n     \"$ref\": \"JobReference\",\n     \"description\": \"Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults).\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The resource type.\",\n     \"default\": \"bigquery#queryResponse\"\n    },\n    \"pageToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token used for paging results.\"\n    },\n    \"rows\": {\n     \"type\": \"array\",\n     \"description\": \"An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above.\",\n     \"items\": {\n      \"$ref\": \"TableRow\"\n     }\n    },\n    \"schema\": {\n     \"$ref\": \"TableSchema\",\n     \"description\": \"The schema of the results. Present only when the query completes successfully.\"\n    },\n    \"totalBytesProcessed\": {\n     \"type\": \"string\",\n     \"description\": \"The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run.\",\n     \"format\": \"int64\"\n    },\n    \"totalRows\": {\n     \"type\": \"string\",\n     \"description\": \"The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results.\",\n     \"format\": \"uint64\"\n    }\n   }\n  },\n  \"Table\": {\n   \"id\": \"Table\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"creationTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The time when this table was created, in milliseconds since the epoch.\",\n     \"format\": \"int64\"\n    },\n    \"description\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] A user-friendly description of this table.\"\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] A hash of this resource.\"\n    },\n    \"expirationTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.\",\n     \"format\": \"int64\"\n    },\n    \"friendlyName\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] A descriptive name for this table.\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] An opaque ID uniquely identifying the table.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The type of the resource.\",\n     \"default\": \"bigquery#table\"\n    },\n    \"lastModifiedTime\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The time when this table was last modified, in milliseconds since the epoch.\",\n     \"format\": \"uint64\"\n    },\n    \"numBytes\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.\",\n     \"format\": \"int64\"\n    },\n    \"numRows\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.\",\n     \"format\": \"uint64\"\n    },\n    \"schema\": {\n     \"$ref\": \"TableSchema\",\n     \"description\": \"[Optional] Describes the schema of this table.\"\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] A URL that can be used to access this resource again.\"\n    },\n    \"tableReference\": {\n     \"$ref\": \"TableReference\",\n     \"description\": \"[Required] Reference describing the ID of this table.\"\n    },\n    \"type\": {\n     \"type\": \"string\",\n     \"description\": \"[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. The default value is TABLE.\"\n    },\n    \"view\": {\n     \"$ref\": \"ViewDefinition\",\n     \"description\": \"[Optional] The view definition.\"\n    }\n   }\n  },\n  \"TableCell\": {\n   \"id\": \"TableCell\",\n   \"type\": \"object\",\n   \"description\": \"Represents a single cell in the result set. Users of the java client can detect whether their value result is null by calling 'com.google.api.client.util.Data.isNull(cell.getV())'.\",\n   \"properties\": {\n    \"v\": {\n     \"type\": \"any\"\n    }\n   }\n  },\n  \"TableDataInsertAllRequest\": {\n   \"id\": \"TableDataInsertAllRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"ignoreUnknownValues\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. Default is false, which treats unknown values as errors.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The resource type of the response.\",\n     \"default\": \"bigquery#tableDataInsertAllRequest\"\n    },\n    \"rows\": {\n     \"type\": \"array\",\n     \"description\": \"The rows to insert.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"insertId\": {\n        \"type\": \"string\",\n        \"description\": \"[Optional] A unique ID for each row. BigQuery uses this property to detect duplicate insertion requests on a best-effort basis.\"\n       },\n       \"json\": {\n        \"$ref\": \"JsonObject\",\n        \"description\": \"[Required] A JSON object that contains a row of data. The object's properties and values must match the destination table's schema.\"\n       }\n      }\n     }\n    },\n    \"skipInvalidRows\": {\n     \"type\": \"boolean\",\n     \"description\": \"[Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist.\"\n    }\n   }\n  },\n  \"TableDataInsertAllResponse\": {\n   \"id\": \"TableDataInsertAllResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"insertErrors\": {\n     \"type\": \"array\",\n     \"description\": \"An array of errors for rows that were not inserted.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"errors\": {\n        \"type\": \"array\",\n        \"description\": \"Error information for the row indicated by the index property.\",\n        \"items\": {\n         \"$ref\": \"ErrorProto\"\n        }\n       },\n       \"index\": {\n        \"type\": \"integer\",\n        \"description\": \"The index of the row that error applies to.\",\n        \"format\": \"uint32\"\n       }\n      }\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The resource type of the response.\",\n     \"default\": \"bigquery#tableDataInsertAllResponse\"\n    }\n   }\n  },\n  \"TableDataList\": {\n   \"id\": \"TableDataList\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"A hash of this page of results.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The resource type of the response.\",\n     \"default\": \"bigquery#tableDataList\"\n    },\n    \"pageToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token used for paging results. Providing this token instead of the startIndex parameter can help you retrieve stable results when an underlying table is changing.\"\n    },\n    \"rows\": {\n     \"type\": \"array\",\n     \"description\": \"Rows of results.\",\n     \"items\": {\n      \"$ref\": \"TableRow\"\n     }\n    },\n    \"totalRows\": {\n     \"type\": \"string\",\n     \"description\": \"The total number of rows in the complete table.\",\n     \"format\": \"int64\"\n    }\n   }\n  },\n  \"TableFieldSchema\": {\n   \"id\": \"TableFieldSchema\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"description\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The field description. The maximum length is 16K characters.\"\n    },\n    \"fields\": {\n     \"type\": \"array\",\n     \"description\": \"[Optional] Describes the nested schema fields if the type property is set to RECORD.\",\n     \"items\": {\n      \"$ref\": \"TableFieldSchema\"\n     }\n    },\n    \"mode\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.\"\n    },\n    \"name\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.\"\n    },\n    \"type\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).\"\n    }\n   }\n  },\n  \"TableList\": {\n   \"id\": \"TableList\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"A hash of this page of results.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The type of list.\",\n     \"default\": \"bigquery#tableList\"\n    },\n    \"nextPageToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token to request the next page of results.\"\n    },\n    \"tables\": {\n     \"type\": \"array\",\n     \"description\": \"Tables in the requested dataset.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"friendlyName\": {\n        \"type\": \"string\",\n        \"description\": \"The user-friendly name for this table.\"\n       },\n       \"id\": {\n        \"type\": \"string\",\n        \"description\": \"An opaque ID of the table\"\n       },\n       \"kind\": {\n        \"type\": \"string\",\n        \"description\": \"The resource type.\",\n        \"default\": \"bigquery#table\"\n       },\n       \"tableReference\": {\n        \"$ref\": \"TableReference\",\n        \"description\": \"A reference uniquely identifying the table.\"\n       },\n       \"type\": {\n        \"type\": \"string\",\n        \"description\": \"The type of table. Possible values are: TABLE, VIEW.\"\n       }\n      }\n     }\n    },\n    \"totalItems\": {\n     \"type\": \"integer\",\n     \"description\": \"The total number of tables in the dataset.\",\n     \"format\": \"int32\"\n    }\n   }\n  },\n  \"TableReference\": {\n   \"id\": \"TableReference\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"datasetId\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] The ID of the dataset containing this table.\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.tables.update\"\n      ]\n     }\n    },\n    \"projectId\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] The ID of the project containing this table.\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.tables.update\"\n      ]\n     }\n    },\n    \"tableId\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.\",\n     \"annotations\": {\n      \"required\": [\n       \"bigquery.tables.update\"\n      ]\n     }\n    }\n   }\n  },\n  \"TableRow\": {\n   \"id\": \"TableRow\",\n   \"type\": \"object\",\n   \"description\": \"Represents a single row in the result set, consisting of one or more fields.\",\n   \"properties\": {\n    \"f\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"$ref\": \"TableCell\"\n     }\n    }\n   }\n  },\n  \"TableSchema\": {\n   \"id\": \"TableSchema\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"fields\": {\n     \"type\": \"array\",\n     \"description\": \"Describes the fields in a table.\",\n     \"items\": {\n      \"$ref\": \"TableFieldSchema\"\n     }\n    }\n   }\n  },\n  \"ViewDefinition\": {\n   \"id\": \"ViewDefinition\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"query\": {\n     \"type\": \"string\",\n     \"description\": \"[Required] A query that BigQuery executes when the view is referenced.\"\n    }\n   }\n  }\n },\n \"resources\": {\n  \"datasets\": {\n   \"methods\": {\n    \"delete\": {\n     \"id\": \"bigquery.datasets.delete\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of dataset being deleted\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"deleteContents\": {\n       \"type\": \"boolean\",\n       \"description\": \"If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False\",\n       \"location\": \"query\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the dataset being deleted\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\"\n     ],\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"get\": {\n     \"id\": \"bigquery.datasets.get\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Returns the dataset specified by datasetID.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the requested dataset\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the requested dataset\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\"\n     ],\n     \"response\": {\n      \"$ref\": \"Dataset\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"insert\": {\n     \"id\": \"bigquery.datasets.insert\",\n     \"path\": \"projects/{projectId}/datasets\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Creates a new empty dataset.\",\n     \"parameters\": {\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the new dataset\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\"\n     ],\n     \"request\": {\n      \"$ref\": \"Dataset\"\n     },\n     \"response\": {\n      \"$ref\": \"Dataset\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"list\": {\n     \"id\": \"bigquery.datasets.list\",\n     \"path\": \"projects/{projectId}/datasets\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Lists all datasets in the specified project to which you have been granted the READER dataset role.\",\n     \"parameters\": {\n      \"all\": {\n       \"type\": \"boolean\",\n       \"description\": \"Whether to list all datasets, including hidden ones\",\n       \"location\": \"query\"\n      },\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"The maximum number of results to return\",\n       \"format\": \"uint32\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n       \"location\": \"query\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the datasets to be listed\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\"\n     ],\n     \"response\": {\n      \"$ref\": \"DatasetList\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"patch\": {\n     \"id\": \"bigquery.datasets.patch\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the dataset being updated\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the dataset being updated\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\"\n     ],\n     \"request\": {\n      \"$ref\": \"Dataset\"\n     },\n     \"response\": {\n      \"$ref\": \"Dataset\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"update\": {\n     \"id\": \"bigquery.datasets.update\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the dataset being updated\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the dataset being updated\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\"\n     ],\n     \"request\": {\n      \"$ref\": \"Dataset\"\n     },\n     \"response\": {\n      \"$ref\": \"Dataset\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    }\n   }\n  },\n  \"jobs\": {\n   \"methods\": {\n    \"get\": {\n     \"id\": \"bigquery.jobs.get\",\n     \"path\": \"projects/{projectId}/jobs/{jobId}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.\",\n     \"parameters\": {\n      \"jobId\": {\n       \"type\": \"string\",\n       \"description\": \"Job ID of the requested job\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the requested job\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"jobId\"\n     ],\n     \"response\": {\n      \"$ref\": \"Job\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"getQueryResults\": {\n     \"id\": \"bigquery.jobs.getQueryResults\",\n     \"path\": \"projects/{projectId}/queries/{jobId}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves the results of a query job.\",\n     \"parameters\": {\n      \"jobId\": {\n       \"type\": \"string\",\n       \"description\": \"Job ID of the query job\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of results to read\",\n       \"format\": \"uint32\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n       \"location\": \"query\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the query job\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"startIndex\": {\n       \"type\": \"string\",\n       \"description\": \"Zero-based index of the starting row\",\n       \"format\": \"uint64\",\n       \"location\": \"query\"\n      },\n      \"timeoutMs\": {\n       \"type\": \"integer\",\n       \"description\": \"How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error\",\n       \"format\": \"uint32\",\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"jobId\"\n     ],\n     \"response\": {\n      \"$ref\": \"GetQueryResultsResponse\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"insert\": {\n     \"id\": \"bigquery.jobs.insert\",\n     \"path\": \"projects/{projectId}/jobs\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Starts a new asynchronous job. Requires the Can View project role.\",\n     \"parameters\": {\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the project that will be billed for the job\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\"\n     ],\n     \"request\": {\n      \"$ref\": \"Job\"\n     },\n     \"response\": {\n      \"$ref\": \"Job\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_only\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsMediaUpload\": true,\n     \"mediaUpload\": {\n      \"accept\": [\n       \"*/*\"\n      ],\n      \"protocols\": {\n       \"simple\": {\n        \"multipart\": true,\n        \"path\": \"/upload/bigquery/v2/projects/{projectId}/jobs\"\n       },\n       \"resumable\": {\n        \"multipart\": true,\n        \"path\": \"/resumable/upload/bigquery/v2/projects/{projectId}/jobs\"\n       }\n      }\n     }\n    },\n    \"list\": {\n     \"id\": \"bigquery.jobs.list\",\n     \"path\": \"projects/{projectId}/jobs\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.\",\n     \"parameters\": {\n      \"allUsers\": {\n       \"type\": \"boolean\",\n       \"description\": \"Whether to display jobs owned by all users in the project. Default false\",\n       \"location\": \"query\"\n      },\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of results to return\",\n       \"format\": \"uint32\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n       \"location\": \"query\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the jobs to list\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Restrict information returned to a set of selected fields\",\n       \"enum\": [\n        \"full\",\n        \"minimal\"\n       ],\n       \"enumDescriptions\": [\n        \"Includes all job data\",\n        \"Does not include the job configuration\"\n       ],\n       \"location\": \"query\"\n      },\n      \"stateFilter\": {\n       \"type\": \"string\",\n       \"description\": \"Filter for job state\",\n       \"enum\": [\n        \"done\",\n        \"pending\",\n        \"running\"\n       ],\n       \"enumDescriptions\": [\n        \"Finished jobs\",\n        \"Pending jobs\",\n        \"Running jobs\"\n       ],\n       \"repeated\": true,\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\"\n     ],\n     \"response\": {\n      \"$ref\": \"JobList\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"query\": {\n     \"id\": \"bigquery.jobs.query\",\n     \"path\": \"projects/{projectId}/queries\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.\",\n     \"parameters\": {\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the project billed for the query\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\"\n     ],\n     \"request\": {\n      \"$ref\": \"QueryRequest\"\n     },\n     \"response\": {\n      \"$ref\": \"QueryResponse\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    }\n   }\n  },\n  \"projects\": {\n   \"methods\": {\n    \"list\": {\n     \"id\": \"bigquery.projects.list\",\n     \"path\": \"projects\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Lists all projects to which you have been granted any project role.\",\n     \"parameters\": {\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of results to return\",\n       \"format\": \"uint32\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n       \"location\": \"query\"\n      }\n     },\n     \"response\": {\n      \"$ref\": \"ProjectList\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    }\n   }\n  },\n  \"tabledata\": {\n   \"methods\": {\n    \"insertAll\": {\n     \"id\": \"bigquery.tabledata.insertAll\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the destination table.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the destination table.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"tableId\": {\n       \"type\": \"string\",\n       \"description\": \"Table ID of the destination table.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\",\n      \"tableId\"\n     ],\n     \"request\": {\n      \"$ref\": \"TableDataInsertAllRequest\"\n     },\n     \"response\": {\n      \"$ref\": \"TableDataInsertAllResponse\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/bigquery.insertdata\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"list\": {\n     \"id\": \"bigquery.tabledata.list\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves table data from a specified set of rows. Requires the READER dataset role.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the table to read\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of results to return\",\n       \"format\": \"uint32\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"Page token, returned by a previous call, identifying the result set\",\n       \"location\": \"query\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the table to read\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"startIndex\": {\n       \"type\": \"string\",\n       \"description\": \"Zero-based index of the starting row to read\",\n       \"format\": \"uint64\",\n       \"location\": \"query\"\n      },\n      \"tableId\": {\n       \"type\": \"string\",\n       \"description\": \"Table ID of the table to read\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\",\n      \"tableId\"\n     ],\n     \"response\": {\n      \"$ref\": \"TableDataList\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    }\n   }\n  },\n  \"tables\": {\n   \"methods\": {\n    \"delete\": {\n     \"id\": \"bigquery.tables.delete\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the table to delete\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the table to delete\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"tableId\": {\n       \"type\": \"string\",\n       \"description\": \"Table ID of the table to delete\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\",\n      \"tableId\"\n     ],\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"get\": {\n     \"id\": \"bigquery.tables.get\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the requested table\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the requested table\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"tableId\": {\n       \"type\": \"string\",\n       \"description\": \"Table ID of the requested table\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\",\n      \"tableId\"\n     ],\n     \"response\": {\n      \"$ref\": \"Table\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"insert\": {\n     \"id\": \"bigquery.tables.insert\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Creates a new, empty table in the dataset.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the new table\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the new table\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\"\n     ],\n     \"request\": {\n      \"$ref\": \"Table\"\n     },\n     \"response\": {\n      \"$ref\": \"Table\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"list\": {\n     \"id\": \"bigquery.tables.list\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Lists all tables in the specified dataset. Requires the READER dataset role.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the tables to list\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of results to return\",\n       \"format\": \"uint32\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n       \"location\": \"query\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the tables to list\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\"\n     ],\n     \"response\": {\n      \"$ref\": \"TableList\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"patch\": {\n     \"id\": \"bigquery.tables.patch\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the table to update\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the table to update\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"tableId\": {\n       \"type\": \"string\",\n       \"description\": \"Table ID of the table to update\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\",\n      \"tableId\"\n     ],\n     \"request\": {\n      \"$ref\": \"Table\"\n     },\n     \"response\": {\n      \"$ref\": \"Table\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    },\n    \"update\": {\n     \"id\": \"bigquery.tables.update\",\n     \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.\",\n     \"parameters\": {\n      \"datasetId\": {\n       \"type\": \"string\",\n       \"description\": \"Dataset ID of the table to update\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projectId\": {\n       \"type\": \"string\",\n       \"description\": \"Project ID of the table to update\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"tableId\": {\n       \"type\": \"string\",\n       \"description\": \"Table ID of the table to update\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"projectId\",\n      \"datasetId\",\n      \"tableId\"\n     ],\n     \"request\": {\n      \"$ref\": \"Table\"\n     },\n     \"response\": {\n      \"$ref\": \"Table\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/bigquery\",\n      \"https://www.googleapis.com/auth/cloud-platform\"\n     ]\n    }\n   }\n  }\n }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go",
    "content": "// Package bigquery provides access to the BigQuery API.\n//\n// See https://cloud.google.com/bigquery/\n//\n// Usage example:\n//\n//   import \"google.golang.org/api/bigquery/v2\"\n//   ...\n//   bigqueryService, err := bigquery.New(oauthHttpClient)\npackage bigquery\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/googleapi\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Always reference these packages, just in case the auto-generated code\n// below doesn't.\nvar _ = bytes.NewBuffer\nvar _ = strconv.Itoa\nvar _ = fmt.Sprintf\nvar _ = json.NewDecoder\nvar _ = io.Copy\nvar _ = url.Parse\nvar _ = googleapi.Version\nvar _ = errors.New\nvar _ = strings.Replace\nvar _ = context.Background\n\nconst apiId = \"bigquery:v2\"\nconst apiName = \"bigquery\"\nconst apiVersion = \"v2\"\nconst basePath = \"https://www.googleapis.com/bigquery/v2/\"\n\n// OAuth2 scopes used by this API.\nconst (\n\t// View and manage your data in Google BigQuery\n\tBigqueryScope = \"https://www.googleapis.com/auth/bigquery\"\n\n\t// Insert data into Google BigQuery\n\tBigqueryInsertdataScope = \"https://www.googleapis.com/auth/bigquery.insertdata\"\n\n\t// View and manage your data across Google Cloud Platform services\n\tCloudPlatformScope = \"https://www.googleapis.com/auth/cloud-platform\"\n\n\t// Manage your data and permissions in Google Cloud Storage\n\tDevstorageFullControlScope = \"https://www.googleapis.com/auth/devstorage.full_control\"\n\n\t// View your data in Google Cloud Storage\n\tDevstorageReadOnlyScope = \"https://www.googleapis.com/auth/devstorage.read_only\"\n\n\t// Manage your data in Google Cloud Storage\n\tDevstorageReadWriteScope = \"https://www.googleapis.com/auth/devstorage.read_write\"\n)\n\nfunc New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Datasets = NewDatasetsService(s)\n\ts.Jobs = NewJobsService(s)\n\ts.Projects = NewProjectsService(s)\n\ts.Tabledata = NewTabledataService(s)\n\ts.Tables = NewTablesService(s)\n\treturn s, nil\n}\n\ntype Service struct {\n\tclient    *http.Client\n\tBasePath  string // API endpoint base URL\n\tUserAgent string // optional additional User-Agent fragment\n\n\tDatasets *DatasetsService\n\n\tJobs *JobsService\n\n\tProjects *ProjectsService\n\n\tTabledata *TabledataService\n\n\tTables *TablesService\n}\n\nfunc (s *Service) userAgent() string {\n\tif s.UserAgent == \"\" {\n\t\treturn googleapi.UserAgent\n\t}\n\treturn googleapi.UserAgent + \" \" + s.UserAgent\n}\n\nfunc NewDatasetsService(s *Service) *DatasetsService {\n\trs := &DatasetsService{s: s}\n\treturn rs\n}\n\ntype DatasetsService struct {\n\ts *Service\n}\n\nfunc NewJobsService(s *Service) *JobsService {\n\trs := &JobsService{s: s}\n\treturn rs\n}\n\ntype JobsService struct {\n\ts *Service\n}\n\nfunc NewProjectsService(s *Service) *ProjectsService {\n\trs := &ProjectsService{s: s}\n\treturn rs\n}\n\ntype ProjectsService struct {\n\ts *Service\n}\n\nfunc NewTabledataService(s *Service) *TabledataService {\n\trs := &TabledataService{s: s}\n\treturn rs\n}\n\ntype TabledataService struct {\n\ts *Service\n}\n\nfunc NewTablesService(s *Service) *TablesService {\n\trs := &TablesService{s: s}\n\treturn rs\n}\n\ntype TablesService struct {\n\ts *Service\n}\n\ntype CsvOptions struct {\n\t// AllowJaggedRows: [Optional] Indicates if BigQuery should accept rows\n\t// that are missing trailing optional columns. If true, BigQuery treats\n\t// missing trailing columns as null values. If false, records with\n\t// missing trailing columns are treated as bad records, and if there are\n\t// too many bad records, an invalid error is returned in the job result.\n\t// The default value is false.\n\tAllowJaggedRows bool `json:\"allowJaggedRows,omitempty\"`\n\n\t// AllowQuotedNewlines: [Optional] Indicates if BigQuery should allow\n\t// quoted data sections that contain newline characters in a CSV file.\n\t// The default value is false.\n\tAllowQuotedNewlines bool `json:\"allowQuotedNewlines,omitempty\"`\n\n\t// Encoding: [Optional] The character encoding of the data. The\n\t// supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.\n\t// BigQuery decodes the data after the raw, binary data has been split\n\t// using the values of the quote and fieldDelimiter properties.\n\tEncoding string `json:\"encoding,omitempty\"`\n\n\t// FieldDelimiter: [Optional] The separator for fields in a CSV file.\n\t// BigQuery converts the string to ISO-8859-1 encoding, and then uses\n\t// the first byte of the encoded string to split the data in its raw,\n\t// binary state. BigQuery also supports the escape sequence \"\\t\" to\n\t// specify a tab separator. The default value is a comma (',').\n\tFieldDelimiter string `json:\"fieldDelimiter,omitempty\"`\n\n\t// Quote: [Optional] The value that is used to quote data sections in a\n\t// CSV file. BigQuery converts the string to ISO-8859-1 encoding, and\n\t// then uses the first byte of the encoded string to split the data in\n\t// its raw, binary state. The default value is a double-quote ('\"'). If\n\t// your data does not contain quoted sections, set the property value to\n\t// an empty string. If your data contains quoted newline characters, you\n\t// must also set the allowQuotedNewlines property to true.\n\tQuote string `json:\"quote,omitempty\"`\n\n\t// SkipLeadingRows: [Optional] The number of rows at the top of a CSV\n\t// file that BigQuery will skip when reading the data. The default value\n\t// is 0. This property is useful if you have header rows in the file\n\t// that should be skipped.\n\tSkipLeadingRows int64 `json:\"skipLeadingRows,omitempty\"`\n}\n\ntype Dataset struct {\n\t// Access: [Optional] An array of objects that define dataset access for\n\t// one or more entities. You can set this property when inserting or\n\t// updating a dataset in order to control who is allowed to access the\n\t// data. If unspecified at dataset creation time, BigQuery adds default\n\t// dataset access for the following entities: access.specialGroup:\n\t// projectReaders; access.role: READER; access.specialGroup:\n\t// projectWriters; access.role: WRITER; access.specialGroup:\n\t// projectOwners; access.role: OWNER; access.userByEmail: [dataset\n\t// creator email]; access.role: OWNER;\n\tAccess []*DatasetAccess `json:\"access,omitempty\"`\n\n\t// CreationTime: [Output-only] The time when this dataset was created,\n\t// in milliseconds since the epoch.\n\tCreationTime int64 `json:\"creationTime,omitempty,string\"`\n\n\t// DatasetReference: [Required] A reference that identifies the dataset.\n\tDatasetReference *DatasetReference `json:\"datasetReference,omitempty\"`\n\n\t// DefaultTableExpirationMs: [Experimental] The default lifetime of all\n\t// tables in the dataset, in milliseconds. The minimum value is 3600000\n\t// milliseconds (one hour). Once this property is set, all newly-created\n\t// tables in the dataset will have an expirationTime property set to the\n\t// creation time plus the value in this property, and changing the value\n\t// will only affect new tables, not existing ones. When the\n\t// expirationTime for a given table is reached, that table will be\n\t// deleted automatically. If a table's expirationTime is modified or\n\t// removed before the table expires, or if you provide an explicit\n\t// expirationTime when creating a table, that value takes precedence\n\t// over the default expiration time indicated by this property.\n\tDefaultTableExpirationMs int64 `json:\"defaultTableExpirationMs,omitempty,string\"`\n\n\t// Description: [Optional] A user-friendly description of the dataset.\n\tDescription string `json:\"description,omitempty\"`\n\n\t// Etag: [Output-only] A hash of the resource.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// FriendlyName: [Optional] A descriptive name for the dataset.\n\tFriendlyName string `json:\"friendlyName,omitempty\"`\n\n\t// Id: [Output-only] The fully-qualified unique name of the dataset in\n\t// the format projectId:datasetId. The dataset name without the project\n\t// name is given in the datasetId field. When creating a new dataset,\n\t// leave this field blank, and instead specify the datasetId field.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: [Output-only] The resource type.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// LastModifiedTime: [Output-only] The date when this dataset or any of\n\t// its tables was last modified, in milliseconds since the epoch.\n\tLastModifiedTime int64 `json:\"lastModifiedTime,omitempty,string\"`\n\n\t// Location: [Experimental] The location where the data resides. If not\n\t// present, the data will be stored in the US.\n\tLocation string `json:\"location,omitempty\"`\n\n\t// SelfLink: [Output-only] A URL that can be used to access the resource\n\t// again. You can use this URL in Get or Update requests to the\n\t// resource.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n}\n\ntype DatasetAccess struct {\n\t// Domain: [Pick one] A domain to grant access to. Any users signed in\n\t// with the domain specified will be granted the specified access.\n\t// Example: \"example.com\".\n\tDomain string `json:\"domain,omitempty\"`\n\n\t// GroupByEmail: [Pick one] An email address of a Google Group to grant\n\t// access to.\n\tGroupByEmail string `json:\"groupByEmail,omitempty\"`\n\n\t// Role: [Required] Describes the rights granted to the user specified\n\t// by the other member of the access object. The following string values\n\t// are supported: READER, WRITER, OWNER.\n\tRole string `json:\"role,omitempty\"`\n\n\t// SpecialGroup: [Pick one] A special group to grant access to. Possible\n\t// values include: projectOwners: Owners of the enclosing project.\n\t// projectReaders: Readers of the enclosing project. projectWriters:\n\t// Writers of the enclosing project. allAuthenticatedUsers: All\n\t// authenticated BigQuery users.\n\tSpecialGroup string `json:\"specialGroup,omitempty\"`\n\n\t// UserByEmail: [Pick one] An email address of a user to grant access\n\t// to. For example: fred@example.com.\n\tUserByEmail string `json:\"userByEmail,omitempty\"`\n\n\t// View: [Pick one] A view from a different dataset to grant access to.\n\t// Queries executed against that view will have read access to tables in\n\t// this dataset. The role field is not required when this field is set.\n\t// If that view is updated by any user, access to the view needs to be\n\t// granted again via an update operation.\n\tView *TableReference `json:\"view,omitempty\"`\n}\n\ntype DatasetList struct {\n\t// Datasets: An array of the dataset resources in the project. Each\n\t// resource contains basic information. For full information about a\n\t// particular dataset resource, use the Datasets: get method. This\n\t// property is omitted when there are no datasets in the project.\n\tDatasets []*DatasetListDatasets `json:\"datasets,omitempty\"`\n\n\t// Etag: A hash value of the results page. You can use this property to\n\t// determine if the page has changed since the last request.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Kind: The list type. This property always returns the value\n\t// \"bigquery#datasetList\".\n\tKind string `json:\"kind,omitempty\"`\n\n\t// NextPageToken: A token that can be used to request the next results\n\t// page. This property is omitted on the final results page.\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n}\n\ntype DatasetListDatasets struct {\n\t// DatasetReference: The dataset reference. Use this property to access\n\t// specific parts of the dataset's ID, such as project ID or dataset ID.\n\tDatasetReference *DatasetReference `json:\"datasetReference,omitempty\"`\n\n\t// FriendlyName: A descriptive name for the dataset, if one exists.\n\tFriendlyName string `json:\"friendlyName,omitempty\"`\n\n\t// Id: The fully-qualified, unique, opaque ID of the dataset.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: The resource type. This property always returns the value\n\t// \"bigquery#dataset\".\n\tKind string `json:\"kind,omitempty\"`\n}\n\ntype DatasetReference struct {\n\t// DatasetId: [Required] A unique ID for this dataset, without the\n\t// project name. The ID must contain only letters (a-z, A-Z), numbers\n\t// (0-9), or underscores (_). The maximum length is 1,024 characters.\n\tDatasetId string `json:\"datasetId,omitempty\"`\n\n\t// ProjectId: [Optional] The ID of the project containing this dataset.\n\tProjectId string `json:\"projectId,omitempty\"`\n}\n\ntype ErrorProto struct {\n\t// DebugInfo: Debugging information. This property is internal to Google\n\t// and should not be used.\n\tDebugInfo string `json:\"debugInfo,omitempty\"`\n\n\t// Location: Specifies where the error occurred, if present.\n\tLocation string `json:\"location,omitempty\"`\n\n\t// Message: A human-readable description of the error.\n\tMessage string `json:\"message,omitempty\"`\n\n\t// Reason: A short error code that summarizes the error.\n\tReason string `json:\"reason,omitempty\"`\n}\n\ntype ExternalDataConfiguration struct {\n\t// Compression: [Optional] The compression type of the data source.\n\t// Possible values include GZIP and NONE. The default value is NONE.\n\tCompression string `json:\"compression,omitempty\"`\n\n\t// CsvOptions: Additional properties to set if sourceFormat is set to\n\t// CSV.\n\tCsvOptions *CsvOptions `json:\"csvOptions,omitempty\"`\n\n\t// IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow\n\t// extra values that are not represented in the table schema. If true,\n\t// the extra values are ignored. If false, records with extra columns\n\t// are treated as bad records, and if there are too many bad records, an\n\t// invalid error is returned in the job result. The default value is\n\t// false. The sourceFormat property determines what BigQuery treats as\n\t// an extra value: CSV: Trailing columns\n\tIgnoreUnknownValues bool `json:\"ignoreUnknownValues,omitempty\"`\n\n\t// MaxBadRecords: [Optional] The maximum number of bad records that\n\t// BigQuery can ignore when reading data. If the number of bad records\n\t// exceeds this value, an invalid error is returned in the job result.\n\t// The default value is 0, which requires that all records are valid.\n\tMaxBadRecords int64 `json:\"maxBadRecords,omitempty\"`\n\n\t// Schema: [Required] The schema for the data.\n\tSchema *TableSchema `json:\"schema,omitempty\"`\n\n\t// SourceFormat: [Optional] The data format. External data sources must\n\t// be in CSV format. The default value is CSV.\n\tSourceFormat string `json:\"sourceFormat,omitempty\"`\n\n\t// SourceUris: [Required] The fully-qualified URIs that point to your\n\t// data in Google Cloud Storage. Each URI can contain one '*' wildcard\n\t// character and it must come after the 'bucket' name. CSV limits\n\t// related to load jobs apply to external data sources, plus an\n\t// additional limit of 10 GB maximum size across all URIs.\n\tSourceUris []string `json:\"sourceUris,omitempty\"`\n}\n\ntype GetQueryResultsResponse struct {\n\t// CacheHit: Whether the query result was fetched from the query cache.\n\tCacheHit bool `json:\"cacheHit,omitempty\"`\n\n\t// Etag: A hash of this response.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// JobComplete: Whether the query has completed or not. If rows or\n\t// totalRows are present, this will always be true. If this is false,\n\t// totalRows will not be available.\n\tJobComplete bool `json:\"jobComplete,omitempty\"`\n\n\t// JobReference: Reference to the BigQuery Job that was created to run\n\t// the query. This field will be present even if the original request\n\t// timed out, in which case GetQueryResults can be used to read the\n\t// results once the query has completed. Since this API only returns the\n\t// first page of results, subsequent pages can be fetched via the same\n\t// mechanism (GetQueryResults).\n\tJobReference *JobReference `json:\"jobReference,omitempty\"`\n\n\t// Kind: The resource type of the response.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// PageToken: A token used for paging results.\n\tPageToken string `json:\"pageToken,omitempty\"`\n\n\t// Rows: An object with as many results as can be contained within the\n\t// maximum permitted reply size. To get any additional rows, you can\n\t// call GetQueryResults and specify the jobReference returned above.\n\t// Present only when the query completes successfully.\n\tRows []*TableRow `json:\"rows,omitempty\"`\n\n\t// Schema: The schema of the results. Present only when the query\n\t// completes successfully.\n\tSchema *TableSchema `json:\"schema,omitempty\"`\n\n\t// TotalBytesProcessed: The total number of bytes processed for this\n\t// query.\n\tTotalBytesProcessed int64 `json:\"totalBytesProcessed,omitempty,string\"`\n\n\t// TotalRows: The total number of rows in the complete query result set,\n\t// which can be more than the number of rows in this single page of\n\t// results. Present only when the query completes successfully.\n\tTotalRows uint64 `json:\"totalRows,omitempty,string\"`\n}\n\ntype Job struct {\n\t// Configuration: [Required] Describes the job configuration.\n\tConfiguration *JobConfiguration `json:\"configuration,omitempty\"`\n\n\t// Etag: [Output-only] A hash of this resource.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Id: [Output-only] Opaque ID field of the job\n\tId string `json:\"id,omitempty\"`\n\n\t// JobReference: [Optional] Reference describing the unique-per-user\n\t// name of the job.\n\tJobReference *JobReference `json:\"jobReference,omitempty\"`\n\n\t// Kind: [Output-only] The type of the resource.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// SelfLink: [Output-only] A URL that can be used to access this\n\t// resource again.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n\n\t// Statistics: [Output-only] Information about the job, including\n\t// starting time and ending time of the job.\n\tStatistics *JobStatistics `json:\"statistics,omitempty\"`\n\n\t// Status: [Output-only] The status of this job. Examine this value when\n\t// polling an asynchronous job to see if the job is complete.\n\tStatus *JobStatus `json:\"status,omitempty\"`\n\n\t// UserEmail: [Output-only] Email address of the user who ran the job.\n\tUserEmail string `json:\"user_email,omitempty\"`\n}\n\ntype JobConfiguration struct {\n\t// Copy: [Pick one] Copies a table.\n\tCopy *JobConfigurationTableCopy `json:\"copy,omitempty\"`\n\n\t// DryRun: [Optional] If set, don't actually run this job. A valid query\n\t// will return a mostly empty response with some processing statistics,\n\t// while an invalid query will return the same error it would if it\n\t// wasn't a dry run. Behavior of non-query jobs is undefined.\n\tDryRun bool `json:\"dryRun,omitempty\"`\n\n\t// Extract: [Pick one] Configures an extract job.\n\tExtract *JobConfigurationExtract `json:\"extract,omitempty\"`\n\n\t// Link: [Pick one] Configures a link job.\n\tLink *JobConfigurationLink `json:\"link,omitempty\"`\n\n\t// Load: [Pick one] Configures a load job.\n\tLoad *JobConfigurationLoad `json:\"load,omitempty\"`\n\n\t// Query: [Pick one] Configures a query job.\n\tQuery *JobConfigurationQuery `json:\"query,omitempty\"`\n}\n\ntype JobConfigurationExtract struct {\n\t// Compression: [Optional] The compression type to use for exported\n\t// files. Possible values include GZIP and NONE. The default value is\n\t// NONE.\n\tCompression string `json:\"compression,omitempty\"`\n\n\t// DestinationFormat: [Optional] The exported file format. Possible\n\t// values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default\n\t// value is CSV. Tables with nested or repeated fields cannot be\n\t// exported as CSV.\n\tDestinationFormat string `json:\"destinationFormat,omitempty\"`\n\n\t// DestinationUri: [Pick one] DEPRECATED: Use destinationUris instead,\n\t// passing only one URI as necessary. The fully-qualified Google Cloud\n\t// Storage URI where the extracted table should be written.\n\tDestinationUri string `json:\"destinationUri,omitempty\"`\n\n\t// DestinationUris: [Pick one] A list of fully-qualified Google Cloud\n\t// Storage URIs where the extracted table should be written.\n\tDestinationUris []string `json:\"destinationUris,omitempty\"`\n\n\t// FieldDelimiter: [Optional] Delimiter to use between fields in the\n\t// exported data. Default is ','\n\tFieldDelimiter string `json:\"fieldDelimiter,omitempty\"`\n\n\t// PrintHeader: [Optional] Whether to print out a header row in the\n\t// results. Default is true.\n\tPrintHeader bool `json:\"printHeader,omitempty\"`\n\n\t// SourceTable: [Required] A reference to the table being exported.\n\tSourceTable *TableReference `json:\"sourceTable,omitempty\"`\n}\n\ntype JobConfigurationLink struct {\n\t// CreateDisposition: [Optional] Specifies whether the job is allowed to\n\t// create new tables. The following values are supported:\n\t// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the\n\t// table. CREATE_NEVER: The table must already exist. If it does not, a\n\t// 'notFound' error is returned in the job result. The default value is\n\t// CREATE_IF_NEEDED. Creation, truncation and append actions occur as\n\t// one atomic update upon job completion.\n\tCreateDisposition string `json:\"createDisposition,omitempty\"`\n\n\t// DestinationTable: [Required] The destination table of the link job.\n\tDestinationTable *TableReference `json:\"destinationTable,omitempty\"`\n\n\t// SourceUri: [Required] URI of source table to link.\n\tSourceUri []string `json:\"sourceUri,omitempty\"`\n\n\t// WriteDisposition: [Optional] Specifies the action that occurs if the\n\t// destination table already exists. The following values are supported:\n\t// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the\n\t// table data. WRITE_APPEND: If the table already exists, BigQuery\n\t// appends the data to the table. WRITE_EMPTY: If the table already\n\t// exists and contains data, a 'duplicate' error is returned in the job\n\t// result. The default value is WRITE_EMPTY. Each action is atomic and\n\t// only occurs if BigQuery is able to complete the job successfully.\n\t// Creation, truncation and append actions occur as one atomic update\n\t// upon job completion.\n\tWriteDisposition string `json:\"writeDisposition,omitempty\"`\n}\n\ntype JobConfigurationLoad struct {\n\t// AllowJaggedRows: [Optional] Accept rows that are missing trailing\n\t// optional columns. The missing values are treated as nulls. If false,\n\t// records with missing trailing columns are treated as bad records, and\n\t// if there are too many bad records, an invalid error is returned in\n\t// the job result. The default value is false. Only applicable to CSV,\n\t// ignored for other formats.\n\tAllowJaggedRows bool `json:\"allowJaggedRows,omitempty\"`\n\n\t// AllowQuotedNewlines: Indicates if BigQuery should allow quoted data\n\t// sections that contain newline characters in a CSV file. The default\n\t// value is false.\n\tAllowQuotedNewlines bool `json:\"allowQuotedNewlines,omitempty\"`\n\n\t// CreateDisposition: [Optional] Specifies whether the job is allowed to\n\t// create new tables. The following values are supported:\n\t// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the\n\t// table. CREATE_NEVER: The table must already exist. If it does not, a\n\t// 'notFound' error is returned in the job result. The default value is\n\t// CREATE_IF_NEEDED. Creation, truncation and append actions occur as\n\t// one atomic update upon job completion.\n\tCreateDisposition string `json:\"createDisposition,omitempty\"`\n\n\t// DestinationTable: [Required] The destination table to load the data\n\t// into.\n\tDestinationTable *TableReference `json:\"destinationTable,omitempty\"`\n\n\t// Encoding: [Optional] The character encoding of the data. The\n\t// supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.\n\t// BigQuery decodes the data after the raw, binary data has been split\n\t// using the values of the quote and fieldDelimiter properties.\n\tEncoding string `json:\"encoding,omitempty\"`\n\n\t// FieldDelimiter: [Optional] The separator for fields in a CSV file.\n\t// BigQuery converts the string to ISO-8859-1 encoding, and then uses\n\t// the first byte of the encoded string to split the data in its raw,\n\t// binary state. BigQuery also supports the escape sequence \"\\t\" to\n\t// specify a tab separator. The default value is a comma (',').\n\tFieldDelimiter string `json:\"fieldDelimiter,omitempty\"`\n\n\t// IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow\n\t// extra values that are not represented in the table schema. If true,\n\t// the extra values are ignored. If false, records with extra columns\n\t// are treated as bad records, and if there are too many bad records, an\n\t// invalid error is returned in the job result. The default value is\n\t// false. The sourceFormat property determines what BigQuery treats as\n\t// an extra value: CSV: Trailing columns JSON: Named values that don't\n\t// match any column names\n\tIgnoreUnknownValues bool `json:\"ignoreUnknownValues,omitempty\"`\n\n\t// MaxBadRecords: [Optional] The maximum number of bad records that\n\t// BigQuery can ignore when running the job. If the number of bad\n\t// records exceeds this value, an invalid error is returned in the job\n\t// result. The default value is 0, which requires that all records are\n\t// valid.\n\tMaxBadRecords int64 `json:\"maxBadRecords,omitempty\"`\n\n\t// ProjectionFields: [Experimental] If sourceFormat is set to\n\t// \"DATASTORE_BACKUP\", indicates which entity properties to load into\n\t// BigQuery from a Cloud Datastore backup. Property names are case\n\t// sensitive and must be top-level properties. If no properties are\n\t// specified, BigQuery loads all properties. If any named property isn't\n\t// found in the Cloud Datastore backup, an invalid error is returned in\n\t// the job result.\n\tProjectionFields []string `json:\"projectionFields,omitempty\"`\n\n\t// Quote: [Optional] The value that is used to quote data sections in a\n\t// CSV file. BigQuery converts the string to ISO-8859-1 encoding, and\n\t// then uses the first byte of the encoded string to split the data in\n\t// its raw, binary state. The default value is a double-quote ('\"'). If\n\t// your data does not contain quoted sections, set the property value to\n\t// an empty string. If your data contains quoted newline characters, you\n\t// must also set the allowQuotedNewlines property to true.\n\tQuote string `json:\"quote,omitempty\"`\n\n\t// Schema: [Optional] The schema for the destination table. The schema\n\t// can be omitted if the destination table already exists or if the\n\t// schema can be inferred from the loaded data.\n\tSchema *TableSchema `json:\"schema,omitempty\"`\n\n\t// SchemaInline: [Deprecated] The inline schema. For CSV schemas,\n\t// specify as \"Field1:Type1[,Field2:Type2]*\". For example, \"foo:STRING,\n\t// bar:INTEGER, baz:FLOAT\".\n\tSchemaInline string `json:\"schemaInline,omitempty\"`\n\n\t// SchemaInlineFormat: [Deprecated] The format of the schemaInline\n\t// property.\n\tSchemaInlineFormat string `json:\"schemaInlineFormat,omitempty\"`\n\n\t// SkipLeadingRows: [Optional] The number of rows at the top of a CSV\n\t// file that BigQuery will skip when loading the data. The default value\n\t// is 0. This property is useful if you have header rows in the file\n\t// that should be skipped.\n\tSkipLeadingRows int64 `json:\"skipLeadingRows,omitempty\"`\n\n\t// SourceFormat: [Optional] The format of the data files. For CSV files,\n\t// specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For\n\t// newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". The default\n\t// value is CSV.\n\tSourceFormat string `json:\"sourceFormat,omitempty\"`\n\n\t// SourceUris: [Required] The fully-qualified URIs that point to your\n\t// data in Google Cloud Storage. Each URI can contain one '*' wildcard\n\t// character and it must come after the 'bucket' name.\n\tSourceUris []string `json:\"sourceUris,omitempty\"`\n\n\t// WriteDisposition: [Optional] Specifies the action that occurs if the\n\t// destination table already exists. The following values are supported:\n\t// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the\n\t// table data. WRITE_APPEND: If the table already exists, BigQuery\n\t// appends the data to the table. WRITE_EMPTY: If the table already\n\t// exists and contains data, a 'duplicate' error is returned in the job\n\t// result. The default value is WRITE_EMPTY. Each action is atomic and\n\t// only occurs if BigQuery is able to complete the job successfully.\n\t// Creation, truncation and append actions occur as one atomic update\n\t// upon job completion.\n\tWriteDisposition string `json:\"writeDisposition,omitempty\"`\n}\n\ntype JobConfigurationQuery struct {\n\t// AllowLargeResults: If true, allows the query to produce arbitrarily\n\t// large result tables at a slight cost in performance. Requires\n\t// destinationTable to be set.\n\tAllowLargeResults bool `json:\"allowLargeResults,omitempty\"`\n\n\t// CreateDisposition: [Optional] Specifies whether the job is allowed to\n\t// create new tables. The following values are supported:\n\t// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the\n\t// table. CREATE_NEVER: The table must already exist. If it does not, a\n\t// 'notFound' error is returned in the job result. The default value is\n\t// CREATE_IF_NEEDED. Creation, truncation and append actions occur as\n\t// one atomic update upon job completion.\n\tCreateDisposition string `json:\"createDisposition,omitempty\"`\n\n\t// DefaultDataset: [Optional] Specifies the default dataset to use for\n\t// unqualified table names in the query.\n\tDefaultDataset *DatasetReference `json:\"defaultDataset,omitempty\"`\n\n\t// DestinationTable: [Optional] Describes the table where the query\n\t// results should be stored. If not present, a new table will be created\n\t// to store the results.\n\tDestinationTable *TableReference `json:\"destinationTable,omitempty\"`\n\n\t// FlattenResults: [Optional] Flattens all nested and repeated fields in\n\t// the query results. The default value is true. allowLargeResults must\n\t// be true if this is set to false.\n\tFlattenResults bool `json:\"flattenResults,omitempty\"`\n\n\t// PreserveNulls: [Deprecated] This property is deprecated.\n\tPreserveNulls bool `json:\"preserveNulls,omitempty\"`\n\n\t// Priority: [Optional] Specifies a priority for the query. Possible\n\t// values include INTERACTIVE and BATCH. The default value is\n\t// INTERACTIVE.\n\tPriority string `json:\"priority,omitempty\"`\n\n\t// Query: [Required] BigQuery SQL query to execute.\n\tQuery string `json:\"query,omitempty\"`\n\n\t// TableDefinitions: [Experimental] If querying an external data source\n\t// outside of BigQuery, describes the data format, location and other\n\t// properties of the data source. By defining these properties, the data\n\t// source can then be queried as if it were a standard BigQuery table.\n\tTableDefinitions map[string]ExternalDataConfiguration `json:\"tableDefinitions,omitempty\"`\n\n\t// UseQueryCache: [Optional] Whether to look for the result in the query\n\t// cache. The query cache is a best-effort cache that will be flushed\n\t// whenever tables in the query are modified. Moreover, the query cache\n\t// is only available when a query does not have a destination table\n\t// specified.\n\tUseQueryCache bool `json:\"useQueryCache,omitempty\"`\n\n\t// WriteDisposition: [Optional] Specifies the action that occurs if the\n\t// destination table already exists. The following values are supported:\n\t// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the\n\t// table data. WRITE_APPEND: If the table already exists, BigQuery\n\t// appends the data to the table. WRITE_EMPTY: If the table already\n\t// exists and contains data, a 'duplicate' error is returned in the job\n\t// result. The default value is WRITE_EMPTY. Each action is atomic and\n\t// only occurs if BigQuery is able to complete the job successfully.\n\t// Creation, truncation and append actions occur as one atomic update\n\t// upon job completion.\n\tWriteDisposition string `json:\"writeDisposition,omitempty\"`\n}\n\ntype JobConfigurationTableCopy struct {\n\t// CreateDisposition: [Optional] Specifies whether the job is allowed to\n\t// create new tables. The following values are supported:\n\t// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the\n\t// table. CREATE_NEVER: The table must already exist. If it does not, a\n\t// 'notFound' error is returned in the job result. The default value is\n\t// CREATE_IF_NEEDED. Creation, truncation and append actions occur as\n\t// one atomic update upon job completion.\n\tCreateDisposition string `json:\"createDisposition,omitempty\"`\n\n\t// DestinationTable: [Required] The destination table\n\tDestinationTable *TableReference `json:\"destinationTable,omitempty\"`\n\n\t// SourceTable: [Pick one] Source table to copy.\n\tSourceTable *TableReference `json:\"sourceTable,omitempty\"`\n\n\t// SourceTables: [Pick one] Source tables to copy.\n\tSourceTables []*TableReference `json:\"sourceTables,omitempty\"`\n\n\t// WriteDisposition: [Optional] Specifies the action that occurs if the\n\t// destination table already exists. The following values are supported:\n\t// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the\n\t// table data. WRITE_APPEND: If the table already exists, BigQuery\n\t// appends the data to the table. WRITE_EMPTY: If the table already\n\t// exists and contains data, a 'duplicate' error is returned in the job\n\t// result. The default value is WRITE_EMPTY. Each action is atomic and\n\t// only occurs if BigQuery is able to complete the job successfully.\n\t// Creation, truncation and append actions occur as one atomic update\n\t// upon job completion.\n\tWriteDisposition string `json:\"writeDisposition,omitempty\"`\n}\n\ntype JobList struct {\n\t// Etag: A hash of this page of results.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Jobs: List of jobs that were requested.\n\tJobs []*JobListJobs `json:\"jobs,omitempty\"`\n\n\t// Kind: The resource type of the response.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// NextPageToken: A token to request the next page of results.\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n\n\t// TotalItems: Total number of jobs in this collection.\n\tTotalItems int64 `json:\"totalItems,omitempty\"`\n}\n\ntype JobListJobs struct {\n\t// Configuration: [Full-projection-only] Specifies the job\n\t// configuration.\n\tConfiguration *JobConfiguration `json:\"configuration,omitempty\"`\n\n\t// ErrorResult: A result object that will be present only if the job has\n\t// failed.\n\tErrorResult *ErrorProto `json:\"errorResult,omitempty\"`\n\n\t// Id: Unique opaque ID of the job.\n\tId string `json:\"id,omitempty\"`\n\n\t// JobReference: Job reference uniquely identifying the job.\n\tJobReference *JobReference `json:\"jobReference,omitempty\"`\n\n\t// Kind: The resource type.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// State: Running state of the job. When the state is DONE, errorResult\n\t// can be checked to determine whether the job succeeded or failed.\n\tState string `json:\"state,omitempty\"`\n\n\t// Statistics: [Output-only] Information about the job, including\n\t// starting time and ending time of the job.\n\tStatistics *JobStatistics `json:\"statistics,omitempty\"`\n\n\t// Status: [Full-projection-only] Describes the state of the job.\n\tStatus *JobStatus `json:\"status,omitempty\"`\n\n\t// UserEmail: [Full-projection-only] Email address of the user who ran\n\t// the job.\n\tUserEmail string `json:\"user_email,omitempty\"`\n}\n\ntype JobReference struct {\n\t// JobId: [Required] The ID of the job. The ID must contain only letters\n\t// (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The\n\t// maximum length is 1,024 characters.\n\tJobId string `json:\"jobId,omitempty\"`\n\n\t// ProjectId: [Required] The ID of the project containing this job.\n\tProjectId string `json:\"projectId,omitempty\"`\n}\n\ntype JobStatistics struct {\n\t// CreationTime: [Output-only] Creation time of this job, in\n\t// milliseconds since the epoch. This field will be present on all jobs.\n\tCreationTime int64 `json:\"creationTime,omitempty,string\"`\n\n\t// EndTime: [Output-only] End time of this job, in milliseconds since\n\t// the epoch. This field will be present whenever a job is in the DONE\n\t// state.\n\tEndTime int64 `json:\"endTime,omitempty,string\"`\n\n\t// Extract: [Output-only] Statistics for an extract job.\n\tExtract *JobStatistics4 `json:\"extract,omitempty\"`\n\n\t// Load: [Output-only] Statistics for a load job.\n\tLoad *JobStatistics3 `json:\"load,omitempty\"`\n\n\t// Query: [Output-only] Statistics for a query job.\n\tQuery *JobStatistics2 `json:\"query,omitempty\"`\n\n\t// StartTime: [Output-only] Start time of this job, in milliseconds\n\t// since the epoch. This field will be present when the job transitions\n\t// from the PENDING state to either RUNNING or DONE.\n\tStartTime int64 `json:\"startTime,omitempty,string\"`\n\n\t// TotalBytesProcessed: [Output-only] [Deprecated] Use the bytes\n\t// processed in the query statistics instead.\n\tTotalBytesProcessed int64 `json:\"totalBytesProcessed,omitempty,string\"`\n}\n\ntype JobStatistics2 struct {\n\t// CacheHit: [Output-only] Whether the query result was fetched from the\n\t// query cache.\n\tCacheHit bool `json:\"cacheHit,omitempty\"`\n\n\t// TotalBytesProcessed: [Output-only] Total bytes processed for this\n\t// job.\n\tTotalBytesProcessed int64 `json:\"totalBytesProcessed,omitempty,string\"`\n}\n\ntype JobStatistics3 struct {\n\t// InputFileBytes: [Output-only] Number of bytes of source data in a\n\t// joad job.\n\tInputFileBytes int64 `json:\"inputFileBytes,omitempty,string\"`\n\n\t// InputFiles: [Output-only] Number of source files in a load job.\n\tInputFiles int64 `json:\"inputFiles,omitempty,string\"`\n\n\t// OutputBytes: [Output-only] Size of the loaded data in bytes. Note\n\t// that while an import job is in the running state, this value may\n\t// change.\n\tOutputBytes int64 `json:\"outputBytes,omitempty,string\"`\n\n\t// OutputRows: [Output-only] Number of rows imported in a load job. Note\n\t// that while an import job is in the running state, this value may\n\t// change.\n\tOutputRows int64 `json:\"outputRows,omitempty,string\"`\n}\n\ntype JobStatistics4 struct {\n\t// DestinationUriFileCounts: [Experimental] Number of files per\n\t// destination URI or URI pattern specified in the extract\n\t// configuration. These values will be in the same order as the URIs\n\t// specified in the 'destinationUris' field.\n\tDestinationUriFileCounts googleapi.Int64s `json:\"destinationUriFileCounts,omitempty\"`\n}\n\ntype JobStatus struct {\n\t// ErrorResult: [Output-only] Final error result of the job. If present,\n\t// indicates that the job has completed and was unsuccessful.\n\tErrorResult *ErrorProto `json:\"errorResult,omitempty\"`\n\n\t// Errors: [Output-only] All errors encountered during the running of\n\t// the job. Errors here do not necessarily mean that the job has\n\t// completed or was unsuccessful.\n\tErrors []*ErrorProto `json:\"errors,omitempty\"`\n\n\t// State: [Output-only] Running state of the job.\n\tState string `json:\"state,omitempty\"`\n}\n\ntype JsonValue interface{}\n\ntype ProjectList struct {\n\t// Etag: A hash of the page of results\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Kind: The type of list.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// NextPageToken: A token to request the next page of results.\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n\n\t// Projects: Projects to which you have at least READ access.\n\tProjects []*ProjectListProjects `json:\"projects,omitempty\"`\n\n\t// TotalItems: The total number of projects in the list.\n\tTotalItems int64 `json:\"totalItems,omitempty\"`\n}\n\ntype ProjectListProjects struct {\n\t// FriendlyName: A descriptive name for this project.\n\tFriendlyName string `json:\"friendlyName,omitempty\"`\n\n\t// Id: An opaque ID of this project.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: The resource type.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// NumericId: The numeric ID of this project.\n\tNumericId uint64 `json:\"numericId,omitempty,string\"`\n\n\t// ProjectReference: A unique reference to this project.\n\tProjectReference *ProjectReference `json:\"projectReference,omitempty\"`\n}\n\ntype ProjectReference struct {\n\t// ProjectId: [Required] ID of the project. Can be either the numeric ID\n\t// or the assigned ID of the project.\n\tProjectId string `json:\"projectId,omitempty\"`\n}\n\ntype QueryRequest struct {\n\t// DefaultDataset: [Optional] Specifies the default datasetId and\n\t// projectId to assume for any unqualified table names in the query. If\n\t// not set, all table names in the query string must be qualified in the\n\t// format 'datasetId.tableId'.\n\tDefaultDataset *DatasetReference `json:\"defaultDataset,omitempty\"`\n\n\t// DryRun: [Optional] If set, don't actually run this job. A valid query\n\t// will return a mostly empty response with some processing statistics,\n\t// while an invalid query will return the same error it would if it\n\t// wasn't a dry run.\n\tDryRun bool `json:\"dryRun,omitempty\"`\n\n\t// Kind: The resource type of the request.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// MaxResults: [Optional] The maximum number of rows of data to return\n\t// per page of results. Setting this flag to a small value such as 1000\n\t// and then paging through results might improve reliability when the\n\t// query result set is large. In addition to this limit, responses are\n\t// also limited to 10 MB. By default, there is no maximum row count, and\n\t// only the byte limit applies.\n\tMaxResults int64 `json:\"maxResults,omitempty\"`\n\n\t// PreserveNulls: [Deprecated] This property is deprecated.\n\tPreserveNulls bool `json:\"preserveNulls,omitempty\"`\n\n\t// Query: [Required] A query string, following the BigQuery query\n\t// syntax, of the query to execute. Example: \"SELECT count(f1) FROM\n\t// [myProjectId:myDatasetId.myTableId]\".\n\tQuery string `json:\"query,omitempty\"`\n\n\t// TimeoutMs: [Optional] How long to wait for the query to complete, in\n\t// milliseconds, before the request times out and returns. Note that\n\t// this is only a timeout for the request, not the query. If the query\n\t// takes longer to run than the timeout value, the call returns without\n\t// any results and with the 'jobComplete' flag set to false. You can\n\t// call GetQueryResults() to wait for the query to complete and read the\n\t// results. The default value is 10000 milliseconds (10 seconds).\n\tTimeoutMs int64 `json:\"timeoutMs,omitempty\"`\n\n\t// UseQueryCache: [Optional] Whether to look for the result in the query\n\t// cache. The query cache is a best-effort cache that will be flushed\n\t// whenever tables in the query are modified. The default value is true.\n\tUseQueryCache bool `json:\"useQueryCache,omitempty\"`\n}\n\ntype QueryResponse struct {\n\t// CacheHit: Whether the query result was fetched from the query cache.\n\tCacheHit bool `json:\"cacheHit,omitempty\"`\n\n\t// JobComplete: Whether the query has completed or not. If rows or\n\t// totalRows are present, this will always be true. If this is false,\n\t// totalRows will not be available.\n\tJobComplete bool `json:\"jobComplete,omitempty\"`\n\n\t// JobReference: Reference to the Job that was created to run the query.\n\t// This field will be present even if the original request timed out, in\n\t// which case GetQueryResults can be used to read the results once the\n\t// query has completed. Since this API only returns the first page of\n\t// results, subsequent pages can be fetched via the same mechanism\n\t// (GetQueryResults).\n\tJobReference *JobReference `json:\"jobReference,omitempty\"`\n\n\t// Kind: The resource type.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// PageToken: A token used for paging results.\n\tPageToken string `json:\"pageToken,omitempty\"`\n\n\t// Rows: An object with as many results as can be contained within the\n\t// maximum permitted reply size. To get any additional rows, you can\n\t// call GetQueryResults and specify the jobReference returned above.\n\tRows []*TableRow `json:\"rows,omitempty\"`\n\n\t// Schema: The schema of the results. Present only when the query\n\t// completes successfully.\n\tSchema *TableSchema `json:\"schema,omitempty\"`\n\n\t// TotalBytesProcessed: The total number of bytes processed for this\n\t// query. If this query was a dry run, this is the number of bytes that\n\t// would be processed if the query were run.\n\tTotalBytesProcessed int64 `json:\"totalBytesProcessed,omitempty,string\"`\n\n\t// TotalRows: The total number of rows in the complete query result set,\n\t// which can be more than the number of rows in this single page of\n\t// results.\n\tTotalRows uint64 `json:\"totalRows,omitempty,string\"`\n}\n\ntype Table struct {\n\t// CreationTime: [Output-only] The time when this table was created, in\n\t// milliseconds since the epoch.\n\tCreationTime int64 `json:\"creationTime,omitempty,string\"`\n\n\t// Description: [Optional] A user-friendly description of this table.\n\tDescription string `json:\"description,omitempty\"`\n\n\t// Etag: [Output-only] A hash of this resource.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// ExpirationTime: [Optional] The time when this table expires, in\n\t// milliseconds since the epoch. If not present, the table will persist\n\t// indefinitely. Expired tables will be deleted and their storage\n\t// reclaimed.\n\tExpirationTime int64 `json:\"expirationTime,omitempty,string\"`\n\n\t// FriendlyName: [Optional] A descriptive name for this table.\n\tFriendlyName string `json:\"friendlyName,omitempty\"`\n\n\t// Id: [Output-only] An opaque ID uniquely identifying the table.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: [Output-only] The type of the resource.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// LastModifiedTime: [Output-only] The time when this table was last\n\t// modified, in milliseconds since the epoch.\n\tLastModifiedTime uint64 `json:\"lastModifiedTime,omitempty,string\"`\n\n\t// NumBytes: [Output-only] The size of the table in bytes. This property\n\t// is unavailable for tables that are actively receiving streaming\n\t// inserts.\n\tNumBytes int64 `json:\"numBytes,omitempty,string\"`\n\n\t// NumRows: [Output-only] The number of rows of data in this table. This\n\t// property is unavailable for tables that are actively receiving\n\t// streaming inserts.\n\tNumRows uint64 `json:\"numRows,omitempty,string\"`\n\n\t// Schema: [Optional] Describes the schema of this table.\n\tSchema *TableSchema `json:\"schema,omitempty\"`\n\n\t// SelfLink: [Output-only] A URL that can be used to access this\n\t// resource again.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n\n\t// TableReference: [Required] Reference describing the ID of this table.\n\tTableReference *TableReference `json:\"tableReference,omitempty\"`\n\n\t// Type: [Output-only] Describes the table type. The following values\n\t// are supported: TABLE: A normal BigQuery table. VIEW: A virtual table\n\t// defined by a SQL query. The default value is TABLE.\n\tType string `json:\"type,omitempty\"`\n\n\t// View: [Optional] The view definition.\n\tView *ViewDefinition `json:\"view,omitempty\"`\n}\n\ntype TableCell struct {\n\tV interface{} `json:\"v,omitempty\"`\n}\n\ntype TableDataInsertAllRequest struct {\n\t// IgnoreUnknownValues: [Optional] Accept rows that contain values that\n\t// do not match the schema. The unknown values are ignored. Default is\n\t// false, which treats unknown values as errors.\n\tIgnoreUnknownValues bool `json:\"ignoreUnknownValues,omitempty\"`\n\n\t// Kind: The resource type of the response.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// Rows: The rows to insert.\n\tRows []*TableDataInsertAllRequestRows `json:\"rows,omitempty\"`\n\n\t// SkipInvalidRows: [Optional] Insert all valid rows of a request, even\n\t// if invalid rows exist. The default value is false, which causes the\n\t// entire request to fail if any invalid rows exist.\n\tSkipInvalidRows bool `json:\"skipInvalidRows,omitempty\"`\n}\n\ntype TableDataInsertAllRequestRows struct {\n\t// InsertId: [Optional] A unique ID for each row. BigQuery uses this\n\t// property to detect duplicate insertion requests on a best-effort\n\t// basis.\n\tInsertId string `json:\"insertId,omitempty\"`\n\n\t// Json: [Required] A JSON object that contains a row of data. The\n\t// object's properties and values must match the destination table's\n\t// schema.\n\tJson map[string]JsonValue `json:\"json,omitempty\"`\n}\n\ntype TableDataInsertAllResponse struct {\n\t// InsertErrors: An array of errors for rows that were not inserted.\n\tInsertErrors []*TableDataInsertAllResponseInsertErrors `json:\"insertErrors,omitempty\"`\n\n\t// Kind: The resource type of the response.\n\tKind string `json:\"kind,omitempty\"`\n}\n\ntype TableDataInsertAllResponseInsertErrors struct {\n\t// Errors: Error information for the row indicated by the index\n\t// property.\n\tErrors []*ErrorProto `json:\"errors,omitempty\"`\n\n\t// Index: The index of the row that error applies to.\n\tIndex int64 `json:\"index,omitempty\"`\n}\n\ntype TableDataList struct {\n\t// Etag: A hash of this page of results.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Kind: The resource type of the response.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// PageToken: A token used for paging results. Providing this token\n\t// instead of the startIndex parameter can help you retrieve stable\n\t// results when an underlying table is changing.\n\tPageToken string `json:\"pageToken,omitempty\"`\n\n\t// Rows: Rows of results.\n\tRows []*TableRow `json:\"rows,omitempty\"`\n\n\t// TotalRows: The total number of rows in the complete table.\n\tTotalRows int64 `json:\"totalRows,omitempty,string\"`\n}\n\ntype TableFieldSchema struct {\n\t// Description: [Optional] The field description. The maximum length is\n\t// 16K characters.\n\tDescription string `json:\"description,omitempty\"`\n\n\t// Fields: [Optional] Describes the nested schema fields if the type\n\t// property is set to RECORD.\n\tFields []*TableFieldSchema `json:\"fields,omitempty\"`\n\n\t// Mode: [Optional] The field mode. Possible values include NULLABLE,\n\t// REQUIRED and REPEATED. The default value is NULLABLE.\n\tMode string `json:\"mode,omitempty\"`\n\n\t// Name: [Required] The field name. The name must contain only letters\n\t// (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a\n\t// letter or underscore. The maximum length is 128 characters.\n\tName string `json:\"name,omitempty\"`\n\n\t// Type: [Required] The field data type. Possible values include STRING,\n\t// INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates\n\t// that the field contains a nested schema).\n\tType string `json:\"type,omitempty\"`\n}\n\ntype TableList struct {\n\t// Etag: A hash of this page of results.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Kind: The type of list.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// NextPageToken: A token to request the next page of results.\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n\n\t// Tables: Tables in the requested dataset.\n\tTables []*TableListTables `json:\"tables,omitempty\"`\n\n\t// TotalItems: The total number of tables in the dataset.\n\tTotalItems int64 `json:\"totalItems,omitempty\"`\n}\n\ntype TableListTables struct {\n\t// FriendlyName: The user-friendly name for this table.\n\tFriendlyName string `json:\"friendlyName,omitempty\"`\n\n\t// Id: An opaque ID of the table\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: The resource type.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// TableReference: A reference uniquely identifying the table.\n\tTableReference *TableReference `json:\"tableReference,omitempty\"`\n\n\t// Type: The type of table. Possible values are: TABLE, VIEW.\n\tType string `json:\"type,omitempty\"`\n}\n\ntype TableReference struct {\n\t// DatasetId: [Required] The ID of the dataset containing this table.\n\tDatasetId string `json:\"datasetId,omitempty\"`\n\n\t// ProjectId: [Required] The ID of the project containing this table.\n\tProjectId string `json:\"projectId,omitempty\"`\n\n\t// TableId: [Required] The ID of the table. The ID must contain only\n\t// letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum\n\t// length is 1,024 characters.\n\tTableId string `json:\"tableId,omitempty\"`\n}\n\ntype TableRow struct {\n\tF []*TableCell `json:\"f,omitempty\"`\n}\n\ntype TableSchema struct {\n\t// Fields: Describes the fields in a table.\n\tFields []*TableFieldSchema `json:\"fields,omitempty\"`\n}\n\ntype ViewDefinition struct {\n\t// Query: [Required] A query that BigQuery executes when the view is\n\t// referenced.\n\tQuery string `json:\"query,omitempty\"`\n}\n\n// method id \"bigquery.datasets.delete\":\n\ntype DatasetsDeleteCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\topt_      map[string]interface{}\n}\n\n// Delete: Deletes the dataset specified by the datasetId value. Before\n// you can delete a dataset, you must delete all its tables, either\n// manually or by specifying deleteContents. Immediately after deletion,\n// you can create another dataset with the same name.\nfunc (r *DatasetsService) Delete(projectId string, datasetId string) *DatasetsDeleteCall {\n\tc := &DatasetsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\treturn c\n}\n\n// DeleteContents sets the optional parameter \"deleteContents\": If True,\n// delete all the tables in the dataset. If False and the dataset\n// contains tables, the request will fail. Default is False\nfunc (c *DatasetsDeleteCall) DeleteContents(deleteContents bool) *DatasetsDeleteCall {\n\tc.opt_[\"deleteContents\"] = deleteContents\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DatasetsDeleteCall) Fields(s ...googleapi.Field) *DatasetsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DatasetsDeleteCall) Do() error {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"deleteContents\"]; ok {\n\t\tparams.Set(\"deleteContents\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"bigquery.datasets.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of dataset being deleted\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"deleteContents\": {\n\t//       \"description\": \"If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"boolean\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the dataset being deleted\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.datasets.get\":\n\ntype DatasetsGetCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\topt_      map[string]interface{}\n}\n\n// Get: Returns the dataset specified by datasetID.\nfunc (r *DatasetsService) Get(projectId string, datasetId string) *DatasetsGetCall {\n\tc := &DatasetsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DatasetsGetCall) Fields(s ...googleapi.Field) *DatasetsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DatasetsGetCall) Do() (*Dataset, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Dataset\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Returns the dataset specified by datasetID.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.datasets.get\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the requested dataset\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the requested dataset\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Dataset\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.datasets.insert\":\n\ntype DatasetsInsertCall struct {\n\ts         *Service\n\tprojectId string\n\tdataset   *Dataset\n\topt_      map[string]interface{}\n}\n\n// Insert: Creates a new empty dataset.\nfunc (r *DatasetsService) Insert(projectId string, dataset *Dataset) *DatasetsInsertCall {\n\tc := &DatasetsInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.dataset = dataset\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DatasetsInsertCall) Fields(s ...googleapi.Field) *DatasetsInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DatasetsInsertCall) Do() (*Dataset, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Dataset\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a new empty dataset.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"bigquery.datasets.insert\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the new dataset\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Dataset\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Dataset\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.datasets.list\":\n\ntype DatasetsListCall struct {\n\ts         *Service\n\tprojectId string\n\topt_      map[string]interface{}\n}\n\n// List: Lists all datasets in the specified project to which you have\n// been granted the READER dataset role.\nfunc (r *DatasetsService) List(projectId string) *DatasetsListCall {\n\tc := &DatasetsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\treturn c\n}\n\n// All sets the optional parameter \"all\": Whether to list all datasets,\n// including hidden ones\nfunc (c *DatasetsListCall) All(all bool) *DatasetsListCall {\n\tc.opt_[\"all\"] = all\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": The maximum\n// number of results to return\nfunc (c *DatasetsListCall) MaxResults(maxResults int64) *DatasetsListCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": Page token,\n// returned by a previous call, to request the next page of results\nfunc (c *DatasetsListCall) PageToken(pageToken string) *DatasetsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DatasetsListCall) Fields(s ...googleapi.Field) *DatasetsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DatasetsListCall) Do() (*DatasetList, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"all\"]; ok {\n\t\tparams.Set(\"all\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *DatasetList\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all datasets in the specified project to which you have been granted the READER dataset role.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.datasets.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"all\": {\n\t//       \"description\": \"Whether to list all datasets, including hidden ones\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"boolean\"\n\t//     },\n\t//     \"maxResults\": {\n\t//       \"description\": \"The maximum number of results to return\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the datasets to be listed\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets\",\n\t//   \"response\": {\n\t//     \"$ref\": \"DatasetList\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.datasets.patch\":\n\ntype DatasetsPatchCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\tdataset   *Dataset\n\topt_      map[string]interface{}\n}\n\n// Patch: Updates information in an existing dataset. The update method\n// replaces the entire dataset resource, whereas the patch method only\n// replaces fields that are provided in the submitted dataset resource.\n// This method supports patch semantics.\nfunc (r *DatasetsService) Patch(projectId string, datasetId string, dataset *Dataset) *DatasetsPatchCall {\n\tc := &DatasetsPatchCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.dataset = dataset\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DatasetsPatchCall) Fields(s ...googleapi.Field) *DatasetsPatchCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DatasetsPatchCall) Do() (*Dataset, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PATCH\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Dataset\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.\",\n\t//   \"httpMethod\": \"PATCH\",\n\t//   \"id\": \"bigquery.datasets.patch\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the dataset being updated\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the dataset being updated\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Dataset\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Dataset\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.datasets.update\":\n\ntype DatasetsUpdateCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\tdataset   *Dataset\n\topt_      map[string]interface{}\n}\n\n// Update: Updates information in an existing dataset. The update method\n// replaces the entire dataset resource, whereas the patch method only\n// replaces fields that are provided in the submitted dataset resource.\nfunc (r *DatasetsService) Update(projectId string, datasetId string, dataset *Dataset) *DatasetsUpdateCall {\n\tc := &DatasetsUpdateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.dataset = dataset\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DatasetsUpdateCall) Fields(s ...googleapi.Field) *DatasetsUpdateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DatasetsUpdateCall) Do() (*Dataset, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Dataset\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"bigquery.datasets.update\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the dataset being updated\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the dataset being updated\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Dataset\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Dataset\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.jobs.get\":\n\ntype JobsGetCall struct {\n\ts         *Service\n\tprojectId string\n\tjobId     string\n\topt_      map[string]interface{}\n}\n\n// Get: Returns information about a specific job. Job information is\n// available for a six month period after creation. Requires that you're\n// the person who ran the job, or have the Is Owner project role.\nfunc (r *JobsService) Get(projectId string, jobId string) *JobsGetCall {\n\tc := &JobsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.jobId = jobId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *JobsGetCall) Fields(s ...googleapi.Field) *JobsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *JobsGetCall) Do() (*Job, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/jobs/{jobId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"jobId\":     c.jobId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Job\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.jobs.get\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"jobId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"jobId\": {\n\t//       \"description\": \"Job ID of the requested job\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the requested job\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/jobs/{jobId}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Job\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.jobs.getQueryResults\":\n\ntype JobsGetQueryResultsCall struct {\n\ts         *Service\n\tprojectId string\n\tjobId     string\n\topt_      map[string]interface{}\n}\n\n// GetQueryResults: Retrieves the results of a query job.\nfunc (r *JobsService) GetQueryResults(projectId string, jobId string) *JobsGetQueryResultsCall {\n\tc := &JobsGetQueryResultsCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.jobId = jobId\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of results to read\nfunc (c *JobsGetQueryResultsCall) MaxResults(maxResults int64) *JobsGetQueryResultsCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": Page token,\n// returned by a previous call, to request the next page of results\nfunc (c *JobsGetQueryResultsCall) PageToken(pageToken string) *JobsGetQueryResultsCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// StartIndex sets the optional parameter \"startIndex\": Zero-based index\n// of the starting row\nfunc (c *JobsGetQueryResultsCall) StartIndex(startIndex uint64) *JobsGetQueryResultsCall {\n\tc.opt_[\"startIndex\"] = startIndex\n\treturn c\n}\n\n// TimeoutMs sets the optional parameter \"timeoutMs\": How long to wait\n// for the query to complete, in milliseconds, before returning. Default\n// is to return immediately. If the timeout passes before the job\n// completes, the request will fail with a TIMEOUT error\nfunc (c *JobsGetQueryResultsCall) TimeoutMs(timeoutMs int64) *JobsGetQueryResultsCall {\n\tc.opt_[\"timeoutMs\"] = timeoutMs\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *JobsGetQueryResultsCall) Fields(s ...googleapi.Field) *JobsGetQueryResultsCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *JobsGetQueryResultsCall) Do() (*GetQueryResultsResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"startIndex\"]; ok {\n\t\tparams.Set(\"startIndex\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"timeoutMs\"]; ok {\n\t\tparams.Set(\"timeoutMs\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/queries/{jobId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"jobId\":     c.jobId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *GetQueryResultsResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves the results of a query job.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.jobs.getQueryResults\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"jobId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"jobId\": {\n\t//       \"description\": \"Job ID of the query job\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of results to read\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the query job\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"startIndex\": {\n\t//       \"description\": \"Zero-based index of the starting row\",\n\t//       \"format\": \"uint64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"timeoutMs\": {\n\t//       \"description\": \"How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/queries/{jobId}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"GetQueryResultsResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.jobs.insert\":\n\ntype JobsInsertCall struct {\n\ts          *Service\n\tprojectId  string\n\tjob        *Job\n\topt_       map[string]interface{}\n\tmedia_     io.Reader\n\tresumable_ googleapi.SizeReaderAt\n\tmediaType_ string\n\tctx_       context.Context\n\tprotocol_  string\n}\n\n// Insert: Starts a new asynchronous job. Requires the Can View project\n// role.\nfunc (r *JobsService) Insert(projectId string, job *Job) *JobsInsertCall {\n\tc := &JobsInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.job = job\n\treturn c\n}\n\n// Media specifies the media to upload in a single chunk.\n// At most one of Media and ResumableMedia may be set.\nfunc (c *JobsInsertCall) Media(r io.Reader) *JobsInsertCall {\n\tc.media_ = r\n\tc.protocol_ = \"multipart\"\n\treturn c\n}\n\n// ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx.\n// At most one of Media and ResumableMedia may be set.\n// mediaType identifies the MIME media type of the upload, such as \"image/png\".\n// If mediaType is \"\", it will be auto-detected.\nfunc (c *JobsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *JobsInsertCall {\n\tc.ctx_ = ctx\n\tc.resumable_ = io.NewSectionReader(r, 0, size)\n\tc.mediaType_ = mediaType\n\tc.protocol_ = \"resumable\"\n\treturn c\n}\n\n// ProgressUpdater provides a callback function that will be called after every chunk.\n// It should be a low-latency function in order to not slow down the upload operation.\n// This should only be called when using ResumableMedia (as opposed to Media).\nfunc (c *JobsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *JobsInsertCall {\n\tc.opt_[\"progressUpdater\"] = pu\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *JobsInsertCall) Fields(s ...googleapi.Field) *JobsInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *JobsInsertCall) Do() (*Job, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.job)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/jobs\")\n\tvar progressUpdater_ googleapi.ProgressUpdater\n\tif v, ok := c.opt_[\"progressUpdater\"]; ok {\n\t\tif pu, ok := v.(googleapi.ProgressUpdater); ok {\n\t\t\tprogressUpdater_ = pu\n\t\t}\n\t}\n\tif c.media_ != nil || c.resumable_ != nil {\n\t\turls = strings.Replace(urls, \"https://www.googleapis.com/\", \"https://www.googleapis.com/upload/\", 1)\n\t\tparams.Set(\"uploadType\", c.protocol_)\n\t}\n\turls += \"?\" + params.Encode()\n\tif c.protocol_ != \"resumable\" {\n\t\tvar cancel func()\n\t\tcancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype)\n\t\tif cancel != nil {\n\t\t\tdefer cancel()\n\t\t}\n\t}\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t})\n\tif c.protocol_ == \"resumable\" {\n\t\treq.ContentLength = 0\n\t\tif c.mediaType_ == \"\" {\n\t\t\tc.mediaType_ = googleapi.DetectMediaType(c.resumable_)\n\t\t}\n\t\treq.Header.Set(\"X-Upload-Content-Type\", c.mediaType_)\n\t\treq.Body = nil\n\t} else {\n\t\treq.Header.Set(\"Content-Type\", ctype)\n\t}\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tif c.protocol_ == \"resumable\" {\n\t\tloc := res.Header.Get(\"Location\")\n\t\trx := &googleapi.ResumableUpload{\n\t\t\tClient:        c.s.client,\n\t\t\tUserAgent:     c.s.userAgent(),\n\t\t\tURI:           loc,\n\t\t\tMedia:         c.resumable_,\n\t\t\tMediaType:     c.mediaType_,\n\t\t\tContentLength: c.resumable_.Size(),\n\t\t\tCallback:      progressUpdater_,\n\t\t}\n\t\tres, err = rx.Upload(c.ctx_)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer res.Body.Close()\n\t}\n\tvar ret *Job\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Starts a new asynchronous job. Requires the Can View project role.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"bigquery.jobs.insert\",\n\t//   \"mediaUpload\": {\n\t//     \"accept\": [\n\t//       \"*/*\"\n\t//     ],\n\t//     \"protocols\": {\n\t//       \"resumable\": {\n\t//         \"multipart\": true,\n\t//         \"path\": \"/resumable/upload/bigquery/v2/projects/{projectId}/jobs\"\n\t//       },\n\t//       \"simple\": {\n\t//         \"multipart\": true,\n\t//         \"path\": \"/upload/bigquery/v2/projects/{projectId}/jobs\"\n\t//       }\n\t//     }\n\t//   },\n\t//   \"parameterOrder\": [\n\t//     \"projectId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the project that will be billed for the job\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/jobs\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Job\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Job\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_only\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsMediaUpload\": true\n\t// }\n\n}\n\n// method id \"bigquery.jobs.list\":\n\ntype JobsListCall struct {\n\ts         *Service\n\tprojectId string\n\topt_      map[string]interface{}\n}\n\n// List: Lists all jobs that you started in the specified project. The\n// job list returns in reverse chronological order of when the jobs were\n// created, starting with the most recent job created. Requires the Can\n// View project role, or the Is Owner project role if you set the\n// allUsers property.\nfunc (r *JobsService) List(projectId string) *JobsListCall {\n\tc := &JobsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\treturn c\n}\n\n// AllUsers sets the optional parameter \"allUsers\": Whether to display\n// jobs owned by all users in the project. Default false\nfunc (c *JobsListCall) AllUsers(allUsers bool) *JobsListCall {\n\tc.opt_[\"allUsers\"] = allUsers\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of results to return\nfunc (c *JobsListCall) MaxResults(maxResults int64) *JobsListCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": Page token,\n// returned by a previous call, to request the next page of results\nfunc (c *JobsListCall) PageToken(pageToken string) *JobsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Restrict\n// information returned to a set of selected fields\n//\n// Possible values:\n//   \"full\" - Includes all job data\n//   \"minimal\" - Does not include the job configuration\nfunc (c *JobsListCall) Projection(projection string) *JobsListCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// StateFilter sets the optional parameter \"stateFilter\": Filter for job\n// state\n//\n// Possible values:\n//   \"done\" - Finished jobs\n//   \"pending\" - Pending jobs\n//   \"running\" - Running jobs\nfunc (c *JobsListCall) StateFilter(stateFilter string) *JobsListCall {\n\tc.opt_[\"stateFilter\"] = stateFilter\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *JobsListCall) Fields(s ...googleapi.Field) *JobsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *JobsListCall) Do() (*JobList, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"allUsers\"]; ok {\n\t\tparams.Set(\"allUsers\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"stateFilter\"]; ok {\n\t\tparams.Set(\"stateFilter\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/jobs\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *JobList\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.jobs.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"allUsers\": {\n\t//       \"description\": \"Whether to display jobs owned by all users in the project. Default false\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"boolean\"\n\t//     },\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of results to return\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the jobs to list\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Restrict information returned to a set of selected fields\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"minimal\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Includes all job data\",\n\t//         \"Does not include the job configuration\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"stateFilter\": {\n\t//       \"description\": \"Filter for job state\",\n\t//       \"enum\": [\n\t//         \"done\",\n\t//         \"pending\",\n\t//         \"running\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Finished jobs\",\n\t//         \"Pending jobs\",\n\t//         \"Running jobs\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"repeated\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/jobs\",\n\t//   \"response\": {\n\t//     \"$ref\": \"JobList\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.jobs.query\":\n\ntype JobsQueryCall struct {\n\ts            *Service\n\tprojectId    string\n\tqueryrequest *QueryRequest\n\topt_         map[string]interface{}\n}\n\n// Query: Runs a BigQuery SQL query synchronously and returns query\n// results if the query completes within a specified timeout.\nfunc (r *JobsService) Query(projectId string, queryrequest *QueryRequest) *JobsQueryCall {\n\tc := &JobsQueryCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.queryrequest = queryrequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *JobsQueryCall) Fields(s ...googleapi.Field) *JobsQueryCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *JobsQueryCall) Do() (*QueryResponse, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.queryrequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/queries\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *QueryResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"bigquery.jobs.query\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the project billed for the query\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/queries\",\n\t//   \"request\": {\n\t//     \"$ref\": \"QueryRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"QueryResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.projects.list\":\n\ntype ProjectsListCall struct {\n\ts    *Service\n\topt_ map[string]interface{}\n}\n\n// List: Lists all projects to which you have been granted any project\n// role.\nfunc (r *ProjectsService) List() *ProjectsListCall {\n\tc := &ProjectsListCall{s: r.s, opt_: make(map[string]interface{})}\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of results to return\nfunc (c *ProjectsListCall) MaxResults(maxResults int64) *ProjectsListCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": Page token,\n// returned by a previous call, to request the next page of results\nfunc (c *ProjectsListCall) PageToken(pageToken string) *ProjectsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsListCall) Fields(s ...googleapi.Field) *ProjectsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsListCall) Do() (*ProjectList, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.SetOpaque(req.URL)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ProjectList\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all projects to which you have been granted any project role.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.projects.list\",\n\t//   \"parameters\": {\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of results to return\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ProjectList\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tabledata.insertAll\":\n\ntype TabledataInsertAllCall struct {\n\ts                         *Service\n\tprojectId                 string\n\tdatasetId                 string\n\ttableId                   string\n\ttabledatainsertallrequest *TableDataInsertAllRequest\n\topt_                      map[string]interface{}\n}\n\n// InsertAll: Streams data into BigQuery one record at a time without\n// needing to run a load job. Requires the WRITER dataset role.\nfunc (r *TabledataService) InsertAll(projectId string, datasetId string, tableId string, tabledatainsertallrequest *TableDataInsertAllRequest) *TabledataInsertAllCall {\n\tc := &TabledataInsertAllCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.tableId = tableId\n\tc.tabledatainsertallrequest = tabledatainsertallrequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TabledataInsertAllCall) Fields(s ...googleapi.Field) *TabledataInsertAllCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TabledataInsertAllCall) Do() (*TableDataInsertAllResponse, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.tabledatainsertallrequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t\t\"tableId\":   c.tableId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *TableDataInsertAllResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"bigquery.tabledata.insertAll\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\",\n\t//     \"tableId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the destination table.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the destination table.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"tableId\": {\n\t//       \"description\": \"Table ID of the destination table.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll\",\n\t//   \"request\": {\n\t//     \"$ref\": \"TableDataInsertAllRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"TableDataInsertAllResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/bigquery.insertdata\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tabledata.list\":\n\ntype TabledataListCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\ttableId   string\n\topt_      map[string]interface{}\n}\n\n// List: Retrieves table data from a specified set of rows. Requires the\n// READER dataset role.\nfunc (r *TabledataService) List(projectId string, datasetId string, tableId string) *TabledataListCall {\n\tc := &TabledataListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.tableId = tableId\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of results to return\nfunc (c *TabledataListCall) MaxResults(maxResults int64) *TabledataListCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": Page token,\n// returned by a previous call, identifying the result set\nfunc (c *TabledataListCall) PageToken(pageToken string) *TabledataListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// StartIndex sets the optional parameter \"startIndex\": Zero-based index\n// of the starting row to read\nfunc (c *TabledataListCall) StartIndex(startIndex uint64) *TabledataListCall {\n\tc.opt_[\"startIndex\"] = startIndex\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TabledataListCall) Fields(s ...googleapi.Field) *TabledataListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TabledataListCall) Do() (*TableDataList, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"startIndex\"]; ok {\n\t\tparams.Set(\"startIndex\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t\t\"tableId\":   c.tableId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *TableDataList\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves table data from a specified set of rows. Requires the READER dataset role.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.tabledata.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\",\n\t//     \"tableId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the table to read\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of results to return\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"Page token, returned by a previous call, identifying the result set\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the table to read\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"startIndex\": {\n\t//       \"description\": \"Zero-based index of the starting row to read\",\n\t//       \"format\": \"uint64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"tableId\": {\n\t//       \"description\": \"Table ID of the table to read\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data\",\n\t//   \"response\": {\n\t//     \"$ref\": \"TableDataList\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tables.delete\":\n\ntype TablesDeleteCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\ttableId   string\n\topt_      map[string]interface{}\n}\n\n// Delete: Deletes the table specified by tableId from the dataset. If\n// the table contains data, all the data will be deleted.\nfunc (r *TablesService) Delete(projectId string, datasetId string, tableId string) *TablesDeleteCall {\n\tc := &TablesDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.tableId = tableId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TablesDeleteCall) Fields(s ...googleapi.Field) *TablesDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TablesDeleteCall) Do() error {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t\t\"tableId\":   c.tableId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"bigquery.tables.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\",\n\t//     \"tableId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the table to delete\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the table to delete\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"tableId\": {\n\t//       \"description\": \"Table ID of the table to delete\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tables.get\":\n\ntype TablesGetCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\ttableId   string\n\topt_      map[string]interface{}\n}\n\n// Get: Gets the specified table resource by table ID. This method does\n// not return the data in the table, it only returns the table resource,\n// which describes the structure of this table.\nfunc (r *TablesService) Get(projectId string, datasetId string, tableId string) *TablesGetCall {\n\tc := &TablesGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.tableId = tableId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TablesGetCall) Fields(s ...googleapi.Field) *TablesGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TablesGetCall) Do() (*Table, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t\t\"tableId\":   c.tableId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Table\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.tables.get\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\",\n\t//     \"tableId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the requested table\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the requested table\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"tableId\": {\n\t//       \"description\": \"Table ID of the requested table\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Table\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tables.insert\":\n\ntype TablesInsertCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\ttable     *Table\n\topt_      map[string]interface{}\n}\n\n// Insert: Creates a new, empty table in the dataset.\nfunc (r *TablesService) Insert(projectId string, datasetId string, table *Table) *TablesInsertCall {\n\tc := &TablesInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.table = table\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TablesInsertCall) Fields(s ...googleapi.Field) *TablesInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TablesInsertCall) Do() (*Table, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.table)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Table\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a new, empty table in the dataset.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"bigquery.tables.insert\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the new table\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the new table\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Table\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Table\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tables.list\":\n\ntype TablesListCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\topt_      map[string]interface{}\n}\n\n// List: Lists all tables in the specified dataset. Requires the READER\n// dataset role.\nfunc (r *TablesService) List(projectId string, datasetId string) *TablesListCall {\n\tc := &TablesListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of results to return\nfunc (c *TablesListCall) MaxResults(maxResults int64) *TablesListCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": Page token,\n// returned by a previous call, to request the next page of results\nfunc (c *TablesListCall) PageToken(pageToken string) *TablesListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TablesListCall) Fields(s ...googleapi.Field) *TablesListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TablesListCall) Do() (*TableList, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *TableList\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all tables in the specified dataset. Requires the READER dataset role.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"bigquery.tables.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the tables to list\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of results to return\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"Page token, returned by a previous call, to request the next page of results\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the tables to list\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables\",\n\t//   \"response\": {\n\t//     \"$ref\": \"TableList\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tables.patch\":\n\ntype TablesPatchCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\ttableId   string\n\ttable     *Table\n\topt_      map[string]interface{}\n}\n\n// Patch: Updates information in an existing table. The update method\n// replaces the entire table resource, whereas the patch method only\n// replaces fields that are provided in the submitted table resource.\n// This method supports patch semantics.\nfunc (r *TablesService) Patch(projectId string, datasetId string, tableId string, table *Table) *TablesPatchCall {\n\tc := &TablesPatchCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.tableId = tableId\n\tc.table = table\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TablesPatchCall) Fields(s ...googleapi.Field) *TablesPatchCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TablesPatchCall) Do() (*Table, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.table)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PATCH\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t\t\"tableId\":   c.tableId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Table\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.\",\n\t//   \"httpMethod\": \"PATCH\",\n\t//   \"id\": \"bigquery.tables.patch\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\",\n\t//     \"tableId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the table to update\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the table to update\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"tableId\": {\n\t//       \"description\": \"Table ID of the table to update\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Table\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Table\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"bigquery.tables.update\":\n\ntype TablesUpdateCall struct {\n\ts         *Service\n\tprojectId string\n\tdatasetId string\n\ttableId   string\n\ttable     *Table\n\topt_      map[string]interface{}\n}\n\n// Update: Updates information in an existing table. The update method\n// replaces the entire table resource, whereas the patch method only\n// replaces fields that are provided in the submitted table resource.\nfunc (r *TablesService) Update(projectId string, datasetId string, tableId string, table *Table) *TablesUpdateCall {\n\tc := &TablesUpdateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.datasetId = datasetId\n\tc.tableId = tableId\n\tc.table = table\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *TablesUpdateCall) Fields(s ...googleapi.Field) *TablesUpdateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *TablesUpdateCall) Do() (*Table, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.table)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"datasetId\": c.datasetId,\n\t\t\"tableId\":   c.tableId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Table\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"bigquery.tables.update\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"datasetId\",\n\t//     \"tableId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"datasetId\": {\n\t//       \"description\": \"Dataset ID of the table to update\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"Project ID of the table to update\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"tableId\": {\n\t//       \"description\": \"Table ID of the table to update\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"projects/{projectId}/datasets/{datasetId}/tables/{tableId}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Table\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Table\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/bigquery\",\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/container/v1beta1/container-api.json",
    "content": "{\n \"kind\": \"discovery#restDescription\",\n \"etag\": \"\\\"ye6orv2F-1npMW3u9suM3a7C5Bo/ReRXGEgk9TcyLgT1qFhzuzuEb7E\\\"\",\n \"discoveryVersion\": \"v1\",\n \"id\": \"container:v1beta1\",\n \"name\": \"container\",\n \"version\": \"v1beta1\",\n \"revision\": \"20150504\",\n \"title\": \"Google Container Engine API\",\n \"description\": \"The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.\",\n \"ownerDomain\": \"google.com\",\n \"ownerName\": \"Google\",\n \"icons\": {\n  \"x16\": \"http://www.google.com/images/icons/product/search-16.gif\",\n  \"x32\": \"http://www.google.com/images/icons/product/search-32.gif\"\n },\n \"documentationLink\": \"https://cloud.google.com/container-engine/docs/v1beta1/\",\n \"protocol\": \"rest\",\n \"baseUrl\": \"https://www.googleapis.com/container/v1beta1/projects/\",\n \"basePath\": \"/container/v1beta1/projects/\",\n \"rootUrl\": \"https://www.googleapis.com/\",\n \"servicePath\": \"container/v1beta1/projects/\",\n \"batchPath\": \"batch\",\n \"parameters\": {\n  \"alt\": {\n   \"type\": \"string\",\n   \"description\": \"Data format for the response.\",\n   \"default\": \"json\",\n   \"enum\": [\n    \"json\"\n   ],\n   \"enumDescriptions\": [\n    \"Responses with Content-Type of application/json\"\n   ],\n   \"location\": \"query\"\n  },\n  \"fields\": {\n   \"type\": \"string\",\n   \"description\": \"Selector specifying which fields to include in a partial response.\",\n   \"location\": \"query\"\n  },\n  \"key\": {\n   \"type\": \"string\",\n   \"description\": \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n   \"location\": \"query\"\n  },\n  \"oauth_token\": {\n   \"type\": \"string\",\n   \"description\": \"OAuth 2.0 token for the current user.\",\n   \"location\": \"query\"\n  },\n  \"prettyPrint\": {\n   \"type\": \"boolean\",\n   \"description\": \"Returns response with indentations and line breaks.\",\n   \"default\": \"true\",\n   \"location\": \"query\"\n  },\n  \"quotaUser\": {\n   \"type\": \"string\",\n   \"description\": \"Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.\",\n   \"location\": \"query\"\n  },\n  \"userIp\": {\n   \"type\": \"string\",\n   \"description\": \"IP address of the site where the request originates. Use this if you want to enforce per-user limits.\",\n   \"location\": \"query\"\n  }\n },\n \"auth\": {\n  \"oauth2\": {\n   \"scopes\": {\n    \"https://www.googleapis.com/auth/cloud-platform\": {\n     \"description\": \"View and manage your data across Google Cloud Platform services\"\n    }\n   }\n  }\n },\n \"schemas\": {\n  \"Cluster\": {\n   \"id\": \"Cluster\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"clusterApiVersion\": {\n     \"type\": \"string\",\n     \"description\": \"The API version of the Kubernetes master and kubelets running in this cluster. Leave blank to pick up the latest stable release, or specify a version of the form \\\"x.y.z\\\". The Google Container Engine release notes lists the currently supported versions. If an incorrect version is specified, the server returns an error listing the currently supported versions.\"\n    },\n    \"containerIpv4Cidr\": {\n     \"type\": \"string\",\n     \"description\": \"The IP address range of the container pods in this cluster, in  CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block in 10.0.0.0/8 or 172.16.0.0/12.\"\n    },\n    \"creationTimestamp\": {\n     \"type\": \"string\",\n     \"description\": \"[Output only] The time the cluster was created, in RFC3339 text format.\"\n    },\n    \"description\": {\n     \"type\": \"string\",\n     \"description\": \"An optional description of this cluster.\"\n    },\n    \"enableCloudLogging\": {\n     \"type\": \"boolean\",\n     \"description\": \"Whether logs from the cluster should be made available via the Google Cloud Logging service. This includes both logs from your applications running in the cluster as well as logs from the Kubernetes components themselves.\"\n    },\n    \"endpoint\": {\n     \"type\": \"string\",\n     \"description\": \"[Output only] The IP address of this cluster's Kubernetes master. The endpoint can be accessed from the internet at https://username:password@endpoint/.\\n\\nSee the masterAuth property of this resource for username and password information.\"\n    },\n    \"masterAuth\": {\n     \"$ref\": \"MasterAuth\",\n     \"description\": \"The authentication information for accessing the master.\"\n    },\n    \"name\": {\n     \"type\": \"string\",\n     \"description\": \"The name of this cluster. The name must be unique within this project and zone, and can be up to 40 characters with the following restrictions:  \\n- Lowercase letters, numbers, and hyphens only.\\n- Must start with a letter.\\n- Must end with a number or a letter.\"\n    },\n    \"network\": {\n     \"type\": \"string\",\n     \"description\": \"The name of the Google Compute Engine network to which the cluster is connected.\"\n    },\n    \"nodeConfig\": {\n     \"$ref\": \"NodeConfig\",\n     \"description\": \"The machine type and image to use for all nodes in this cluster. See the descriptions of the child properties of nodeConfig.\"\n    },\n    \"nodeRoutingPrefixSize\": {\n     \"type\": \"integer\",\n     \"description\": \"[Output only] The size of the address space on each node for hosting containers.\",\n     \"format\": \"int32\"\n    },\n    \"numNodes\": {\n     \"type\": \"integer\",\n     \"description\": \"The number of nodes to create in this cluster. You must ensure that your Compute Engine resource quota is sufficient for this number of instances plus one (to include the master). You must also have available firewall and routes quota.\",\n     \"format\": \"int32\"\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"[Output only] Server-defined URL for the resource.\"\n    },\n    \"servicesIpv4Cidr\": {\n     \"type\": \"string\",\n     \"description\": \"[Output only] The IP address range of the Kubernetes services in this cluster, in  CIDR notation (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the container CIDR.\"\n    },\n    \"status\": {\n     \"type\": \"string\",\n     \"description\": \"[Output only] The current status of this cluster.\",\n     \"enum\": [\n      \"error\",\n      \"provisioning\",\n      \"running\",\n      \"stopping\"\n     ],\n     \"enumDescriptions\": [\n      \"\",\n      \"\",\n      \"\",\n      \"\"\n     ]\n    },\n    \"statusMessage\": {\n     \"type\": \"string\",\n     \"description\": \"[Output only] Additional information about the current status of this cluster, if available.\"\n    },\n    \"zone\": {\n     \"type\": \"string\",\n     \"description\": \"[Output only] The name of the Google Compute Engine zone in which the cluster resides.\"\n    }\n   }\n  },\n  \"CreateClusterRequest\": {\n   \"id\": \"CreateClusterRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"cluster\": {\n     \"$ref\": \"Cluster\",\n     \"description\": \"A cluster resource.\"\n    }\n   }\n  },\n  \"ListAggregatedClustersResponse\": {\n   \"id\": \"ListAggregatedClustersResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"clusters\": {\n     \"type\": \"array\",\n     \"description\": \"A list of clusters in the project, across all zones.\",\n     \"items\": {\n      \"$ref\": \"Cluster\"\n     }\n    }\n   }\n  },\n  \"ListAggregatedOperationsResponse\": {\n   \"id\": \"ListAggregatedOperationsResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"operations\": {\n     \"type\": \"array\",\n     \"description\": \"A list of operations in the project, across all zones.\",\n     \"items\": {\n      \"$ref\": \"Operation\"\n     }\n    }\n   }\n  },\n  \"ListClustersResponse\": {\n   \"id\": \"ListClustersResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"clusters\": {\n     \"type\": \"array\",\n     \"description\": \"A list of clusters in the project in the specified zone.\",\n     \"items\": {\n      \"$ref\": \"Cluster\"\n     }\n    }\n   }\n  },\n  \"ListOperationsResponse\": {\n   \"id\": \"ListOperationsResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"operations\": {\n     \"type\": \"array\",\n     \"description\": \"A list of operations in the project in the specified zone.\",\n     \"items\": {\n      \"$ref\": \"Operation\"\n     }\n    }\n   }\n  },\n  \"MasterAuth\": {\n   \"id\": \"MasterAuth\",\n   \"type\": \"object\",\n   \"description\": \"The authentication information for accessing the master. Authentication is either done using HTTP basic authentication or using a bearer token.\",\n   \"properties\": {\n    \"bearerToken\": {\n     \"type\": \"string\",\n     \"description\": \"The token used to authenticate API requests to the master. The token is to be included in an HTTP Authorization Header in all requests to the master endpoint. The format of the header is: \\\"Authorization: Bearer \\\".\"\n    },\n    \"password\": {\n     \"type\": \"string\",\n     \"description\": \"The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint. Because the master endpoint is open to the internet, you should create a strong password.\"\n    },\n    \"user\": {\n     \"type\": \"string\",\n     \"description\": \"The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint.\"\n    }\n   }\n  },\n  \"NodeConfig\": {\n   \"id\": \"NodeConfig\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"machineType\": {\n     \"type\": \"string\",\n     \"description\": \"The name of a Google Compute Engine machine type (e.g. n1-standard-1).\\n\\nIf unspecified, the default machine type is n1-standard-1.\"\n    },\n    \"serviceAccounts\": {\n     \"type\": \"array\",\n     \"description\": \"The optional list of ServiceAccounts, each with their specified scopes, to be made available on all of the node VMs. In addition to the service accounts and scopes specified, the \\\"default\\\" account will always be created with the following scopes to ensure the correct functioning of the cluster:  \\n- https://www.googleapis.com/auth/compute,\\n- https://www.googleapis.com/auth/devstorage.read_only\",\n     \"items\": {\n      \"$ref\": \"ServiceAccount\"\n     }\n    },\n    \"sourceImage\": {\n     \"type\": \"string\",\n     \"description\": \"The fully-specified name of a Google Compute Engine image. For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version date).\\n\\nIf specifying an image, you are responsible for ensuring its compatibility with the Debian 7 backports image. We recommend leaving this field blank to accept the default backports-debian-7-wheezy value.\"\n    }\n   }\n  },\n  \"Operation\": {\n   \"id\": \"Operation\",\n   \"type\": \"object\",\n   \"description\": \"Defines the operation resource. All fields are output only.\",\n   \"properties\": {\n    \"errorMessage\": {\n     \"type\": \"string\",\n     \"description\": \"If an error has occurred, a textual description of the error.\"\n    },\n    \"name\": {\n     \"type\": \"string\",\n     \"description\": \"The server-assigned ID for the operation.\"\n    },\n    \"operationType\": {\n     \"type\": \"string\",\n     \"description\": \"The operation type.\",\n     \"enum\": [\n      \"createCluster\",\n      \"deleteCluster\"\n     ],\n     \"enumDescriptions\": [\n      \"\",\n      \"\"\n     ]\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"Server-defined URL for the resource.\"\n    },\n    \"status\": {\n     \"type\": \"string\",\n     \"description\": \"The current status of the operation.\",\n     \"enum\": [\n      \"done\",\n      \"pending\",\n      \"running\"\n     ],\n     \"enumDescriptions\": [\n      \"\",\n      \"\",\n      \"\"\n     ]\n    },\n    \"target\": {\n     \"type\": \"string\",\n     \"description\": \"[Optional] The URL of the cluster resource that this operation is associated with.\"\n    },\n    \"targetLink\": {\n     \"type\": \"string\",\n     \"description\": \"Server-defined URL for the target of the operation.\"\n    },\n    \"zone\": {\n     \"type\": \"string\",\n     \"description\": \"The name of the Google Compute Engine zone in which the operation is taking place.\"\n    }\n   }\n  },\n  \"ServiceAccount\": {\n   \"id\": \"ServiceAccount\",\n   \"type\": \"object\",\n   \"description\": \"A Compute Engine service account.\",\n   \"properties\": {\n    \"email\": {\n     \"type\": \"string\",\n     \"description\": \"Email address of the service account.\"\n    },\n    \"scopes\": {\n     \"type\": \"array\",\n     \"description\": \"The list of scopes to be made available for this service account.\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    }\n   }\n  }\n },\n \"resources\": {\n  \"projects\": {\n   \"resources\": {\n    \"clusters\": {\n     \"methods\": {\n      \"list\": {\n       \"id\": \"container.projects.clusters.list\",\n       \"path\": \"{projectId}/clusters\",\n       \"httpMethod\": \"GET\",\n       \"description\": \"Lists all clusters owned by a project across all zones.\",\n       \"parameters\": {\n        \"projectId\": {\n         \"type\": \"string\",\n         \"description\": \"The Google Developers Console project ID or  project number.\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"projectId\"\n       ],\n       \"response\": {\n        \"$ref\": \"ListAggregatedClustersResponse\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\"\n       ]\n      }\n     }\n    },\n    \"operations\": {\n     \"methods\": {\n      \"list\": {\n       \"id\": \"container.projects.operations.list\",\n       \"path\": \"{projectId}/operations\",\n       \"httpMethod\": \"GET\",\n       \"description\": \"Lists all operations in a project, across all zones.\",\n       \"parameters\": {\n        \"projectId\": {\n         \"type\": \"string\",\n         \"description\": \"The Google Developers Console project ID or  project number.\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"projectId\"\n       ],\n       \"response\": {\n        \"$ref\": \"ListAggregatedOperationsResponse\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\"\n       ]\n      }\n     }\n    },\n    \"zones\": {\n     \"resources\": {\n      \"clusters\": {\n       \"methods\": {\n        \"create\": {\n         \"id\": \"container.projects.zones.clusters.create\",\n         \"path\": \"{projectId}/zones/{zoneId}/clusters\",\n         \"httpMethod\": \"POST\",\n         \"description\": \"Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\\n\\nThe cluster is created in the project's default network.\\n\\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\\n\\nFinally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.\",\n         \"parameters\": {\n          \"projectId\": {\n           \"type\": \"string\",\n           \"description\": \"The Google Developers Console project ID or  project number.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"zoneId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n           \"required\": true,\n           \"location\": \"path\"\n          }\n         },\n         \"parameterOrder\": [\n          \"projectId\",\n          \"zoneId\"\n         ],\n         \"request\": {\n          \"$ref\": \"CreateClusterRequest\"\n         },\n         \"response\": {\n          \"$ref\": \"Operation\"\n         },\n         \"scopes\": [\n          \"https://www.googleapis.com/auth/cloud-platform\"\n         ]\n        },\n        \"delete\": {\n         \"id\": \"container.projects.zones.clusters.delete\",\n         \"path\": \"{projectId}/zones/{zoneId}/clusters/{clusterId}\",\n         \"httpMethod\": \"DELETE\",\n         \"description\": \"Deletes the cluster, including the Kubernetes master and all worker nodes.\\n\\nFirewalls and routes that were configured at cluster creation are also deleted.\",\n         \"parameters\": {\n          \"clusterId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the cluster to delete.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"projectId\": {\n           \"type\": \"string\",\n           \"description\": \"The Google Developers Console project ID or  project number.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"zoneId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n           \"required\": true,\n           \"location\": \"path\"\n          }\n         },\n         \"parameterOrder\": [\n          \"projectId\",\n          \"zoneId\",\n          \"clusterId\"\n         ],\n         \"response\": {\n          \"$ref\": \"Operation\"\n         },\n         \"scopes\": [\n          \"https://www.googleapis.com/auth/cloud-platform\"\n         ]\n        },\n        \"get\": {\n         \"id\": \"container.projects.zones.clusters.get\",\n         \"path\": \"{projectId}/zones/{zoneId}/clusters/{clusterId}\",\n         \"httpMethod\": \"GET\",\n         \"description\": \"Gets a specific cluster.\",\n         \"parameters\": {\n          \"clusterId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the cluster to retrieve.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"projectId\": {\n           \"type\": \"string\",\n           \"description\": \"The Google Developers Console project ID or  project number.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"zoneId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n           \"required\": true,\n           \"location\": \"path\"\n          }\n         },\n         \"parameterOrder\": [\n          \"projectId\",\n          \"zoneId\",\n          \"clusterId\"\n         ],\n         \"response\": {\n          \"$ref\": \"Cluster\"\n         },\n         \"scopes\": [\n          \"https://www.googleapis.com/auth/cloud-platform\"\n         ]\n        },\n        \"list\": {\n         \"id\": \"container.projects.zones.clusters.list\",\n         \"path\": \"{projectId}/zones/{zoneId}/clusters\",\n         \"httpMethod\": \"GET\",\n         \"description\": \"Lists all clusters owned by a project in the specified zone.\",\n         \"parameters\": {\n          \"projectId\": {\n           \"type\": \"string\",\n           \"description\": \"The Google Developers Console project ID or  project number.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"zoneId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n           \"required\": true,\n           \"location\": \"path\"\n          }\n         },\n         \"parameterOrder\": [\n          \"projectId\",\n          \"zoneId\"\n         ],\n         \"response\": {\n          \"$ref\": \"ListClustersResponse\"\n         },\n         \"scopes\": [\n          \"https://www.googleapis.com/auth/cloud-platform\"\n         ]\n        }\n       }\n      },\n      \"operations\": {\n       \"methods\": {\n        \"get\": {\n         \"id\": \"container.projects.zones.operations.get\",\n         \"path\": \"{projectId}/zones/{zoneId}/operations/{operationId}\",\n         \"httpMethod\": \"GET\",\n         \"description\": \"Gets the specified operation.\",\n         \"parameters\": {\n          \"operationId\": {\n           \"type\": \"string\",\n           \"description\": \"The server-assigned name of the operation.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"projectId\": {\n           \"type\": \"string\",\n           \"description\": \"The Google Developers Console project ID or  project number.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"zoneId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.\",\n           \"required\": true,\n           \"location\": \"path\"\n          }\n         },\n         \"parameterOrder\": [\n          \"projectId\",\n          \"zoneId\",\n          \"operationId\"\n         ],\n         \"response\": {\n          \"$ref\": \"Operation\"\n         },\n         \"scopes\": [\n          \"https://www.googleapis.com/auth/cloud-platform\"\n         ]\n        },\n        \"list\": {\n         \"id\": \"container.projects.zones.operations.list\",\n         \"path\": \"{projectId}/zones/{zoneId}/operations\",\n         \"httpMethod\": \"GET\",\n         \"description\": \"Lists all operations in a project in a specific zone.\",\n         \"parameters\": {\n          \"projectId\": {\n           \"type\": \"string\",\n           \"description\": \"The Google Developers Console project ID or  project number.\",\n           \"required\": true,\n           \"location\": \"path\"\n          },\n          \"zoneId\": {\n           \"type\": \"string\",\n           \"description\": \"The name of the Google Compute Engine zone to return operations for.\",\n           \"required\": true,\n           \"location\": \"path\"\n          }\n         },\n         \"parameterOrder\": [\n          \"projectId\",\n          \"zoneId\"\n         ],\n         \"response\": {\n          \"$ref\": \"ListOperationsResponse\"\n         },\n         \"scopes\": [\n          \"https://www.googleapis.com/auth/cloud-platform\"\n         ]\n        }\n       }\n      }\n     }\n    }\n   }\n  }\n }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/container/v1beta1/container-gen.go",
    "content": "// Package container provides access to the Google Container Engine API.\n//\n// See https://cloud.google.com/container-engine/docs/v1beta1/\n//\n// Usage example:\n//\n//   import \"google.golang.org/api/container/v1beta1\"\n//   ...\n//   containerService, err := container.New(oauthHttpClient)\npackage container\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/googleapi\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Always reference these packages, just in case the auto-generated code\n// below doesn't.\nvar _ = bytes.NewBuffer\nvar _ = strconv.Itoa\nvar _ = fmt.Sprintf\nvar _ = json.NewDecoder\nvar _ = io.Copy\nvar _ = url.Parse\nvar _ = googleapi.Version\nvar _ = errors.New\nvar _ = strings.Replace\nvar _ = context.Background\n\nconst apiId = \"container:v1beta1\"\nconst apiName = \"container\"\nconst apiVersion = \"v1beta1\"\nconst basePath = \"https://www.googleapis.com/container/v1beta1/projects/\"\n\n// OAuth2 scopes used by this API.\nconst (\n\t// View and manage your data across Google Cloud Platform services\n\tCloudPlatformScope = \"https://www.googleapis.com/auth/cloud-platform\"\n)\n\nfunc New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}\n\ntype Service struct {\n\tclient    *http.Client\n\tBasePath  string // API endpoint base URL\n\tUserAgent string // optional additional User-Agent fragment\n\n\tProjects *ProjectsService\n}\n\nfunc (s *Service) userAgent() string {\n\tif s.UserAgent == \"\" {\n\t\treturn googleapi.UserAgent\n\t}\n\treturn googleapi.UserAgent + \" \" + s.UserAgent\n}\n\nfunc NewProjectsService(s *Service) *ProjectsService {\n\trs := &ProjectsService{s: s}\n\trs.Clusters = NewProjectsClustersService(s)\n\trs.Operations = NewProjectsOperationsService(s)\n\trs.Zones = NewProjectsZonesService(s)\n\treturn rs\n}\n\ntype ProjectsService struct {\n\ts *Service\n\n\tClusters *ProjectsClustersService\n\n\tOperations *ProjectsOperationsService\n\n\tZones *ProjectsZonesService\n}\n\nfunc NewProjectsClustersService(s *Service) *ProjectsClustersService {\n\trs := &ProjectsClustersService{s: s}\n\treturn rs\n}\n\ntype ProjectsClustersService struct {\n\ts *Service\n}\n\nfunc NewProjectsOperationsService(s *Service) *ProjectsOperationsService {\n\trs := &ProjectsOperationsService{s: s}\n\treturn rs\n}\n\ntype ProjectsOperationsService struct {\n\ts *Service\n}\n\nfunc NewProjectsZonesService(s *Service) *ProjectsZonesService {\n\trs := &ProjectsZonesService{s: s}\n\trs.Clusters = NewProjectsZonesClustersService(s)\n\trs.Operations = NewProjectsZonesOperationsService(s)\n\treturn rs\n}\n\ntype ProjectsZonesService struct {\n\ts *Service\n\n\tClusters *ProjectsZonesClustersService\n\n\tOperations *ProjectsZonesOperationsService\n}\n\nfunc NewProjectsZonesClustersService(s *Service) *ProjectsZonesClustersService {\n\trs := &ProjectsZonesClustersService{s: s}\n\treturn rs\n}\n\ntype ProjectsZonesClustersService struct {\n\ts *Service\n}\n\nfunc NewProjectsZonesOperationsService(s *Service) *ProjectsZonesOperationsService {\n\trs := &ProjectsZonesOperationsService{s: s}\n\treturn rs\n}\n\ntype ProjectsZonesOperationsService struct {\n\ts *Service\n}\n\ntype Cluster struct {\n\t// ClusterApiVersion: The API version of the Kubernetes master and\n\t// kubelets running in this cluster. Leave blank to pick up the latest\n\t// stable release, or specify a version of the form \"x.y.z\". The Google\n\t// Container Engine release notes lists the currently supported\n\t// versions. If an incorrect version is specified, the server returns an\n\t// error listing the currently supported versions.\n\tClusterApiVersion string `json:\"clusterApiVersion,omitempty\"`\n\n\t// ContainerIpv4Cidr: The IP address range of the container pods in this\n\t// cluster, in  CIDR notation (e.g. 10.96.0.0/14). Leave blank to have\n\t// one automatically chosen or specify a /14 block in 10.0.0.0/8 or\n\t// 172.16.0.0/12.\n\tContainerIpv4Cidr string `json:\"containerIpv4Cidr,omitempty\"`\n\n\t// CreationTimestamp: [Output only] The time the cluster was created, in\n\t// RFC3339 text format.\n\tCreationTimestamp string `json:\"creationTimestamp,omitempty\"`\n\n\t// Description: An optional description of this cluster.\n\tDescription string `json:\"description,omitempty\"`\n\n\t// EnableCloudLogging: Whether logs from the cluster should be made\n\t// available via the Google Cloud Logging service. This includes both\n\t// logs from your applications running in the cluster as well as logs\n\t// from the Kubernetes components themselves.\n\tEnableCloudLogging bool `json:\"enableCloudLogging,omitempty\"`\n\n\t// Endpoint: [Output only] The IP address of this cluster's Kubernetes\n\t// master. The endpoint can be accessed from the internet at\n\t// https://username:password@endpoint/.\n\t//\n\t// See the masterAuth property of this resource for username and\n\t// password information.\n\tEndpoint string `json:\"endpoint,omitempty\"`\n\n\t// MasterAuth: The authentication information for accessing the master.\n\tMasterAuth *MasterAuth `json:\"masterAuth,omitempty\"`\n\n\t// Name: The name of this cluster. The name must be unique within this\n\t// project and zone, and can be up to 40 characters with the following\n\t// restrictions:\n\t// - Lowercase letters, numbers, and hyphens only.\n\t// - Must start with a letter.\n\t// - Must end with a number or a letter.\n\tName string `json:\"name,omitempty\"`\n\n\t// Network: The name of the Google Compute Engine network to which the\n\t// cluster is connected.\n\tNetwork string `json:\"network,omitempty\"`\n\n\t// NodeConfig: The machine type and image to use for all nodes in this\n\t// cluster. See the descriptions of the child properties of nodeConfig.\n\tNodeConfig *NodeConfig `json:\"nodeConfig,omitempty\"`\n\n\t// NodeRoutingPrefixSize: [Output only] The size of the address space on\n\t// each node for hosting containers.\n\tNodeRoutingPrefixSize int64 `json:\"nodeRoutingPrefixSize,omitempty\"`\n\n\t// NumNodes: The number of nodes to create in this cluster. You must\n\t// ensure that your Compute Engine resource quota is sufficient for this\n\t// number of instances plus one (to include the master). You must also\n\t// have available firewall and routes quota.\n\tNumNodes int64 `json:\"numNodes,omitempty\"`\n\n\t// SelfLink: [Output only] Server-defined URL for the resource.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n\n\t// ServicesIpv4Cidr: [Output only] The IP address range of the\n\t// Kubernetes services in this cluster, in  CIDR notation (e.g.\n\t// 1.2.3.4/29). Service addresses are typically put in the last /16 from\n\t// the container CIDR.\n\tServicesIpv4Cidr string `json:\"servicesIpv4Cidr,omitempty\"`\n\n\t// Status: [Output only] The current status of this cluster.\n\t//\n\t// Possible values:\n\t//   \"error\"\n\t//   \"provisioning\"\n\t//   \"running\"\n\t//   \"stopping\"\n\tStatus string `json:\"status,omitempty\"`\n\n\t// StatusMessage: [Output only] Additional information about the current\n\t// status of this cluster, if available.\n\tStatusMessage string `json:\"statusMessage,omitempty\"`\n\n\t// Zone: [Output only] The name of the Google Compute Engine zone in\n\t// which the cluster resides.\n\tZone string `json:\"zone,omitempty\"`\n}\n\ntype CreateClusterRequest struct {\n\t// Cluster: A cluster resource.\n\tCluster *Cluster `json:\"cluster,omitempty\"`\n}\n\ntype ListAggregatedClustersResponse struct {\n\t// Clusters: A list of clusters in the project, across all zones.\n\tClusters []*Cluster `json:\"clusters,omitempty\"`\n}\n\ntype ListAggregatedOperationsResponse struct {\n\t// Operations: A list of operations in the project, across all zones.\n\tOperations []*Operation `json:\"operations,omitempty\"`\n}\n\ntype ListClustersResponse struct {\n\t// Clusters: A list of clusters in the project in the specified zone.\n\tClusters []*Cluster `json:\"clusters,omitempty\"`\n}\n\ntype ListOperationsResponse struct {\n\t// Operations: A list of operations in the project in the specified\n\t// zone.\n\tOperations []*Operation `json:\"operations,omitempty\"`\n}\n\ntype MasterAuth struct {\n\t// BearerToken: The token used to authenticate API requests to the\n\t// master. The token is to be included in an HTTP Authorization Header\n\t// in all requests to the master endpoint. The format of the header is:\n\t// \"Authorization: Bearer \".\n\tBearerToken string `json:\"bearerToken,omitempty\"`\n\n\t// Password: The password to use for HTTP basic authentication when\n\t// accessing the Kubernetes master endpoint. Because the master endpoint\n\t// is open to the internet, you should create a strong password.\n\tPassword string `json:\"password,omitempty\"`\n\n\t// User: The username to use for HTTP basic authentication when\n\t// accessing the Kubernetes master endpoint.\n\tUser string `json:\"user,omitempty\"`\n}\n\ntype NodeConfig struct {\n\t// MachineType: The name of a Google Compute Engine machine type (e.g.\n\t// n1-standard-1).\n\t//\n\t// If unspecified, the default machine type is n1-standard-1.\n\tMachineType string `json:\"machineType,omitempty\"`\n\n\t// ServiceAccounts: The optional list of ServiceAccounts, each with\n\t// their specified scopes, to be made available on all of the node VMs.\n\t// In addition to the service accounts and scopes specified, the\n\t// \"default\" account will always be created with the following scopes to\n\t// ensure the correct functioning of the cluster:\n\t// - https://www.googleapis.com/auth/compute,\n\t// - https://www.googleapis.com/auth/devstorage.read_only\n\tServiceAccounts []*ServiceAccount `json:\"serviceAccounts,omitempty\"`\n\n\t// SourceImage: The fully-specified name of a Google Compute Engine\n\t// image. For example:\n\t// https://www.googleapis.com/compute/v1/projects/debian-cloud/global/ima\n\t// ges/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version\n\t// date).\n\t//\n\t// If specifying an image, you are responsible for ensuring its\n\t// compatibility with the Debian 7 backports image. We recommend leaving\n\t// this field blank to accept the default backports-debian-7-wheezy\n\t// value.\n\tSourceImage string `json:\"sourceImage,omitempty\"`\n}\n\ntype Operation struct {\n\t// ErrorMessage: If an error has occurred, a textual description of the\n\t// error.\n\tErrorMessage string `json:\"errorMessage,omitempty\"`\n\n\t// Name: The server-assigned ID for the operation.\n\tName string `json:\"name,omitempty\"`\n\n\t// OperationType: The operation type.\n\t//\n\t// Possible values:\n\t//   \"createCluster\"\n\t//   \"deleteCluster\"\n\tOperationType string `json:\"operationType,omitempty\"`\n\n\t// SelfLink: Server-defined URL for the resource.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n\n\t// Status: The current status of the operation.\n\t//\n\t// Possible values:\n\t//   \"done\"\n\t//   \"pending\"\n\t//   \"running\"\n\tStatus string `json:\"status,omitempty\"`\n\n\t// Target: [Optional] The URL of the cluster resource that this\n\t// operation is associated with.\n\tTarget string `json:\"target,omitempty\"`\n\n\t// TargetLink: Server-defined URL for the target of the operation.\n\tTargetLink string `json:\"targetLink,omitempty\"`\n\n\t// Zone: The name of the Google Compute Engine zone in which the\n\t// operation is taking place.\n\tZone string `json:\"zone,omitempty\"`\n}\n\ntype ServiceAccount struct {\n\t// Email: Email address of the service account.\n\tEmail string `json:\"email,omitempty\"`\n\n\t// Scopes: The list of scopes to be made available for this service\n\t// account.\n\tScopes []string `json:\"scopes,omitempty\"`\n}\n\n// method id \"container.projects.clusters.list\":\n\ntype ProjectsClustersListCall struct {\n\ts         *Service\n\tprojectId string\n\topt_      map[string]interface{}\n}\n\n// List: Lists all clusters owned by a project across all zones.\nfunc (r *ProjectsClustersService) List(projectId string) *ProjectsClustersListCall {\n\tc := &ProjectsClustersListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsClustersListCall) Fields(s ...googleapi.Field) *ProjectsClustersListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsClustersListCall) Do() (*ListAggregatedClustersResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/clusters\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ListAggregatedClustersResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all clusters owned by a project across all zones.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"container.projects.clusters.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/clusters\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ListAggregatedClustersResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"container.projects.operations.list\":\n\ntype ProjectsOperationsListCall struct {\n\ts         *Service\n\tprojectId string\n\topt_      map[string]interface{}\n}\n\n// List: Lists all operations in a project, across all zones.\nfunc (r *ProjectsOperationsService) List(projectId string) *ProjectsOperationsListCall {\n\tc := &ProjectsOperationsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsOperationsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsOperationsListCall) Do() (*ListAggregatedOperationsResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/operations\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ListAggregatedOperationsResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all operations in a project, across all zones.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"container.projects.operations.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/operations\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ListAggregatedOperationsResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"container.projects.zones.clusters.create\":\n\ntype ProjectsZonesClustersCreateCall struct {\n\ts                    *Service\n\tprojectId            string\n\tzoneId               string\n\tcreateclusterrequest *CreateClusterRequest\n\topt_                 map[string]interface{}\n}\n\n// Create: Creates a cluster, consisting of the specified number and\n// type of Google Compute Engine instances, plus a Kubernetes master\n// instance.\n//\n// The cluster is created in the project's default network.\n//\n// A firewall is added that allows traffic into port 443 on the master,\n// which enables HTTPS. A firewall and a route is added for each node to\n// allow the containers on that node to communicate with all other\n// instances in the cluster.\n//\n// Finally, an entry is added to the project's global metadata\n// indicating which CIDR range is being used by the cluster.\nfunc (r *ProjectsZonesClustersService) Create(projectId string, zoneId string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall {\n\tc := &ProjectsZonesClustersCreateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.zoneId = zoneId\n\tc.createclusterrequest = createclusterrequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsZonesClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersCreateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsZonesClustersCreateCall) Do() (*Operation, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/zones/{zoneId}/clusters\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"zoneId\":    c.zoneId,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Operation\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\\n\\nThe cluster is created in the project's default network.\\n\\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\\n\\nFinally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"container.projects.zones.clusters.create\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"zoneId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"zoneId\": {\n\t//       \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/zones/{zoneId}/clusters\",\n\t//   \"request\": {\n\t//     \"$ref\": \"CreateClusterRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Operation\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"container.projects.zones.clusters.delete\":\n\ntype ProjectsZonesClustersDeleteCall struct {\n\ts         *Service\n\tprojectId string\n\tzoneId    string\n\tclusterId string\n\topt_      map[string]interface{}\n}\n\n// Delete: Deletes the cluster, including the Kubernetes master and all\n// worker nodes.\n//\n// Firewalls and routes that were configured at cluster creation are\n// also deleted.\nfunc (r *ProjectsZonesClustersService) Delete(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersDeleteCall {\n\tc := &ProjectsZonesClustersDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.zoneId = zoneId\n\tc.clusterId = clusterId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsZonesClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsZonesClustersDeleteCall) Do() (*Operation, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/zones/{zoneId}/clusters/{clusterId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"zoneId\":    c.zoneId,\n\t\t\"clusterId\": c.clusterId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Operation\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Deletes the cluster, including the Kubernetes master and all worker nodes.\\n\\nFirewalls and routes that were configured at cluster creation are also deleted.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"container.projects.zones.clusters.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"zoneId\",\n\t//     \"clusterId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"clusterId\": {\n\t//       \"description\": \"The name of the cluster to delete.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"zoneId\": {\n\t//       \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/zones/{zoneId}/clusters/{clusterId}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Operation\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"container.projects.zones.clusters.get\":\n\ntype ProjectsZonesClustersGetCall struct {\n\ts         *Service\n\tprojectId string\n\tzoneId    string\n\tclusterId string\n\topt_      map[string]interface{}\n}\n\n// Get: Gets a specific cluster.\nfunc (r *ProjectsZonesClustersService) Get(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersGetCall {\n\tc := &ProjectsZonesClustersGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.zoneId = zoneId\n\tc.clusterId = clusterId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsZonesClustersGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsZonesClustersGetCall) Do() (*Cluster, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/zones/{zoneId}/clusters/{clusterId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"zoneId\":    c.zoneId,\n\t\t\"clusterId\": c.clusterId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Cluster\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Gets a specific cluster.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"container.projects.zones.clusters.get\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"zoneId\",\n\t//     \"clusterId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"clusterId\": {\n\t//       \"description\": \"The name of the cluster to retrieve.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"zoneId\": {\n\t//       \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/zones/{zoneId}/clusters/{clusterId}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Cluster\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"container.projects.zones.clusters.list\":\n\ntype ProjectsZonesClustersListCall struct {\n\ts         *Service\n\tprojectId string\n\tzoneId    string\n\topt_      map[string]interface{}\n}\n\n// List: Lists all clusters owned by a project in the specified zone.\nfunc (r *ProjectsZonesClustersService) List(projectId string, zoneId string) *ProjectsZonesClustersListCall {\n\tc := &ProjectsZonesClustersListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.zoneId = zoneId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsZonesClustersListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsZonesClustersListCall) Do() (*ListClustersResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/zones/{zoneId}/clusters\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"zoneId\":    c.zoneId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ListClustersResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all clusters owned by a project in the specified zone.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"container.projects.zones.clusters.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"zoneId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"zoneId\": {\n\t//       \"description\": \"The name of the Google Compute Engine zone in which the cluster resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/zones/{zoneId}/clusters\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ListClustersResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"container.projects.zones.operations.get\":\n\ntype ProjectsZonesOperationsGetCall struct {\n\ts           *Service\n\tprojectId   string\n\tzoneId      string\n\toperationId string\n\topt_        map[string]interface{}\n}\n\n// Get: Gets the specified operation.\nfunc (r *ProjectsZonesOperationsService) Get(projectId string, zoneId string, operationId string) *ProjectsZonesOperationsGetCall {\n\tc := &ProjectsZonesOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.zoneId = zoneId\n\tc.operationId = operationId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsZonesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsZonesOperationsGetCall) Do() (*Operation, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/zones/{zoneId}/operations/{operationId}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\":   c.projectId,\n\t\t\"zoneId\":      c.zoneId,\n\t\t\"operationId\": c.operationId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Operation\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Gets the specified operation.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"container.projects.zones.operations.get\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"zoneId\",\n\t//     \"operationId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"operationId\": {\n\t//       \"description\": \"The server-assigned name of the operation.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"zoneId\": {\n\t//       \"description\": \"The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/zones/{zoneId}/operations/{operationId}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Operation\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"container.projects.zones.operations.list\":\n\ntype ProjectsZonesOperationsListCall struct {\n\ts         *Service\n\tprojectId string\n\tzoneId    string\n\topt_      map[string]interface{}\n}\n\n// List: Lists all operations in a project in a specific zone.\nfunc (r *ProjectsZonesOperationsService) List(projectId string, zoneId string) *ProjectsZonesOperationsListCall {\n\tc := &ProjectsZonesOperationsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectId = projectId\n\tc.zoneId = zoneId\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsZonesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsZonesOperationsListCall) Do() (*ListOperationsResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{projectId}/zones/{zoneId}/operations\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"projectId\": c.projectId,\n\t\t\"zoneId\":    c.zoneId,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ListOperationsResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists all operations in a project in a specific zone.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"container.projects.zones.operations.list\",\n\t//   \"parameterOrder\": [\n\t//     \"projectId\",\n\t//     \"zoneId\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"projectId\": {\n\t//       \"description\": \"The Google Developers Console project ID or  project number.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"zoneId\": {\n\t//       \"description\": \"The name of the Google Compute Engine zone to return operations for.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{projectId}/zones/{zoneId}/operations\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ListOperationsResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\"\n\t//   ]\n\t// }\n\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/googleapi/googleapi.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package googleapi contains the common code shared by all Google API\n// libraries.\npackage googleapi\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"mime/multipart\"\n\t\"net/http\"\n\t\"net/textproto\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/googleapi/internal/uritemplates\"\n)\n\n// ContentTyper is an interface for Readers which know (or would like\n// to override) their Content-Type. If a media body doesn't implement\n// ContentTyper, the type is sniffed from the content using\n// http.DetectContentType.\ntype ContentTyper interface {\n\tContentType() string\n}\n\n// A SizeReaderAt is a ReaderAt with a Size method.\n// An io.SectionReader implements SizeReaderAt.\ntype SizeReaderAt interface {\n\tio.ReaderAt\n\tSize() int64\n}\n\nconst (\n\tVersion = \"0.5\"\n\n\t// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.\n\tstatusResumeIncomplete = 308\n\n\t// UserAgent is the header string used to identify this package.\n\tUserAgent = \"google-api-go-client/\" + Version\n\n\t// uploadPause determines the delay between failed upload attempts\n\tuploadPause = 1 * time.Second\n)\n\n// Error contains an error response from the server.\ntype Error struct {\n\t// Code is the HTTP response status code and will always be populated.\n\tCode int `json:\"code\"`\n\t// Message is the server response message and is only populated when\n\t// explicitly referenced by the JSON server response.\n\tMessage string `json:\"message\"`\n\t// Body is the raw response returned by the server.\n\t// It is often but not always JSON, depending on how the request fails.\n\tBody string\n\n\tErrors []ErrorItem\n}\n\n// ErrorItem is a detailed error code & message from the Google API frontend.\ntype ErrorItem struct {\n\t// Reason is the typed error code. For example: \"some_example\".\n\tReason string `json:\"reason\"`\n\t// Message is the human-readable description of the error.\n\tMessage string `json:\"message\"`\n}\n\nfunc (e *Error) Error() string {\n\tif len(e.Errors) == 0 && e.Message == \"\" {\n\t\treturn fmt.Sprintf(\"googleapi: got HTTP response code %d with body: %v\", e.Code, e.Body)\n\t}\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"googleapi: Error %d: \", e.Code)\n\tif e.Message != \"\" {\n\t\tfmt.Fprintf(&buf, \"%s\", e.Message)\n\t}\n\tif len(e.Errors) == 0 {\n\t\treturn strings.TrimSpace(buf.String())\n\t}\n\tif len(e.Errors) == 1 && e.Errors[0].Message == e.Message {\n\t\tfmt.Fprintf(&buf, \", %s\", e.Errors[0].Reason)\n\t\treturn buf.String()\n\t}\n\tfmt.Fprintln(&buf, \"\\nMore details:\")\n\tfor _, v := range e.Errors {\n\t\tfmt.Fprintf(&buf, \"Reason: %s, Message: %s\\n\", v.Reason, v.Message)\n\t}\n\treturn buf.String()\n}\n\ntype errorReply struct {\n\tError *Error `json:\"error\"`\n}\n\n// CheckResponse returns an error (of type *Error) if the response\n// status code is not 2xx.\nfunc CheckResponse(res *http.Response) error {\n\tif res.StatusCode >= 200 && res.StatusCode <= 299 {\n\t\treturn nil\n\t}\n\tslurp, err := ioutil.ReadAll(res.Body)\n\tif err == nil {\n\t\tjerr := new(errorReply)\n\t\terr = json.Unmarshal(slurp, jerr)\n\t\tif err == nil && jerr.Error != nil {\n\t\t\tif jerr.Error.Code == 0 {\n\t\t\t\tjerr.Error.Code = res.StatusCode\n\t\t\t}\n\t\t\tjerr.Error.Body = string(slurp)\n\t\t\treturn jerr.Error\n\t\t}\n\t}\n\treturn &Error{\n\t\tCode: res.StatusCode,\n\t\tBody: string(slurp),\n\t}\n}\n\ntype MarshalStyle bool\n\nvar WithDataWrapper = MarshalStyle(true)\nvar WithoutDataWrapper = MarshalStyle(false)\n\nfunc (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {\n\tbuf := new(bytes.Buffer)\n\tif wrap {\n\t\tbuf.Write([]byte(`{\"data\": `))\n\t}\n\terr := json.NewEncoder(buf).Encode(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif wrap {\n\t\tbuf.Write([]byte(`}`))\n\t}\n\treturn buf, nil\n}\n\nfunc getMediaType(media io.Reader) (io.Reader, string) {\n\tif typer, ok := media.(ContentTyper); ok {\n\t\treturn media, typer.ContentType()\n\t}\n\n\tpr, pw := io.Pipe()\n\ttyp := \"application/octet-stream\"\n\tbuf, err := ioutil.ReadAll(io.LimitReader(media, 512))\n\tif err != nil {\n\t\tpw.CloseWithError(fmt.Errorf(\"error reading media: %v\", err))\n\t\treturn pr, typ\n\t}\n\ttyp = http.DetectContentType(buf)\n\tmr := io.MultiReader(bytes.NewReader(buf), media)\n\tgo func() {\n\t\t_, err = io.Copy(pw, mr)\n\t\tif err != nil {\n\t\t\tpw.CloseWithError(fmt.Errorf(\"error reading media: %v\", err))\n\t\t\treturn\n\t\t}\n\t\tpw.Close()\n\t}()\n\treturn pr, typ\n}\n\n// DetectMediaType detects and returns the content type of the provided media.\n// If the type can not be determined, \"application/octet-stream\" is returned.\nfunc DetectMediaType(media io.ReaderAt) string {\n\tif typer, ok := media.(ContentTyper); ok {\n\t\treturn typer.ContentType()\n\t}\n\n\ttyp := \"application/octet-stream\"\n\tbuf := make([]byte, 1024)\n\tn, err := media.ReadAt(buf, 0)\n\tbuf = buf[:n]\n\tif err == nil || err == io.EOF {\n\t\ttyp = http.DetectContentType(buf)\n\t}\n\treturn typ\n}\n\ntype Lengther interface {\n\tLen() int\n}\n\n// endingWithErrorReader from r until it returns an error.  If the\n// final error from r is io.EOF and e is non-nil, e is used instead.\ntype endingWithErrorReader struct {\n\tr io.Reader\n\te error\n}\n\nfunc (er endingWithErrorReader) Read(p []byte) (n int, err error) {\n\tn, err = er.r.Read(p)\n\tif err == io.EOF && er.e != nil {\n\t\terr = er.e\n\t}\n\treturn\n}\n\nfunc typeHeader(contentType string) textproto.MIMEHeader {\n\th := make(textproto.MIMEHeader)\n\th.Set(\"Content-Type\", contentType)\n\treturn h\n}\n\n// countingWriter counts the number of bytes it receives to write, but\n// discards them.\ntype countingWriter struct {\n\tn *int64\n}\n\nfunc (w countingWriter) Write(p []byte) (int, error) {\n\t*w.n += int64(len(p))\n\treturn len(p), nil\n}\n\n// ConditionallyIncludeMedia does nothing if media is nil.\n//\n// bodyp is an in/out parameter.  It should initially point to the\n// reader of the application/json (or whatever) payload to send in the\n// API request.  It's updated to point to the multipart body reader.\n//\n// ctypep is an in/out parameter.  It should initially point to the\n// content type of the bodyp, usually \"application/json\".  It's updated\n// to the \"multipart/related\" content type, with random boundary.\n//\n// The return value is the content-length of the entire multpart body.\nfunc ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) {\n\tif media == nil {\n\t\treturn\n\t}\n\t// Get the media type, which might return a different reader instance.\n\tvar mediaType string\n\tmedia, mediaType = getMediaType(media)\n\n\tbody, bodyType := *bodyp, *ctypep\n\n\tpr, pw := io.Pipe()\n\tmpw := multipart.NewWriter(pw)\n\t*bodyp = pr\n\t*ctypep = \"multipart/related; boundary=\" + mpw.Boundary()\n\tgo func() {\n\t\tw, err := mpw.CreatePart(typeHeader(bodyType))\n\t\tif err != nil {\n\t\t\tmpw.Close()\n\t\t\tpw.CloseWithError(fmt.Errorf(\"googleapi: body CreatePart failed: %v\", err))\n\t\t\treturn\n\t\t}\n\t\t_, err = io.Copy(w, body)\n\t\tif err != nil {\n\t\t\tmpw.Close()\n\t\t\tpw.CloseWithError(fmt.Errorf(\"googleapi: body Copy failed: %v\", err))\n\t\t\treturn\n\t\t}\n\n\t\tw, err = mpw.CreatePart(typeHeader(mediaType))\n\t\tif err != nil {\n\t\t\tmpw.Close()\n\t\t\tpw.CloseWithError(fmt.Errorf(\"googleapi: media CreatePart failed: %v\", err))\n\t\t\treturn\n\t\t}\n\t\t_, err = io.Copy(w, media)\n\t\tif err != nil {\n\t\t\tmpw.Close()\n\t\t\tpw.CloseWithError(fmt.Errorf(\"googleapi: media Copy failed: %v\", err))\n\t\t\treturn\n\t\t}\n\t\tmpw.Close()\n\t\tpw.Close()\n\t}()\n\tcancel = func() { pw.CloseWithError(errAborted) }\n\treturn cancel, true\n}\n\nvar errAborted = errors.New(\"googleapi: upload aborted\")\n\n// ProgressUpdater is a function that is called upon every progress update of a resumable upload.\n// This is the only part of a resumable upload (from googleapi) that is usable by the developer.\n// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.\ntype ProgressUpdater func(current, total int64)\n\n// ResumableUpload is used by the generated APIs to provide resumable uploads.\n// It is not used by developers directly.\ntype ResumableUpload struct {\n\tClient *http.Client\n\t// URI is the resumable resource destination provided by the server after specifying \"&uploadType=resumable\".\n\tURI       string\n\tUserAgent string // User-Agent for header of the request\n\t// Media is the object being uploaded.\n\tMedia io.ReaderAt\n\t// MediaType defines the media type, e.g. \"image/jpeg\".\n\tMediaType string\n\t// ContentLength is the full size of the object being uploaded.\n\tContentLength int64\n\n\tmu       sync.Mutex // guards progress\n\tprogress int64      // number of bytes uploaded so far\n\n\t// Callback is an optional function that will be called upon every progress update.\n\tCallback ProgressUpdater\n}\n\nvar (\n\t// rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded.\n\trangeRE = regexp.MustCompile(`^bytes=0\\-(\\d+)$`)\n\t// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.\n\t// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.\n\tchunkSize int64 = 1 << 18\n)\n\n// Progress returns the number of bytes uploaded at this point.\nfunc (rx *ResumableUpload) Progress() int64 {\n\trx.mu.Lock()\n\tdefer rx.mu.Unlock()\n\treturn rx.progress\n}\n\nfunc (rx *ResumableUpload) transferStatus() (int64, *http.Response, error) {\n\treq, _ := http.NewRequest(\"POST\", rx.URI, nil)\n\treq.ContentLength = 0\n\treq.Header.Set(\"User-Agent\", rx.UserAgent)\n\treq.Header.Set(\"Content-Range\", fmt.Sprintf(\"bytes */%v\", rx.ContentLength))\n\tres, err := rx.Client.Do(req)\n\tif err != nil || res.StatusCode != statusResumeIncomplete {\n\t\treturn 0, res, err\n\t}\n\tvar start int64\n\tif m := rangeRE.FindStringSubmatch(res.Header.Get(\"Range\")); len(m) == 2 {\n\t\tstart, err = strconv.ParseInt(m[1], 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, nil, fmt.Errorf(\"unable to parse range size %v\", m[1])\n\t\t}\n\t\tstart += 1 // Start at the next byte\n\t}\n\treturn start, res, nil\n}\n\ntype chunk struct {\n\tbody io.Reader\n\tsize int64\n\terr  error\n}\n\nfunc (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) {\n\tstart, res, err := rx.transferStatus()\n\tif err != nil || res.StatusCode != statusResumeIncomplete {\n\t\treturn res, err\n\t}\n\n\tfor {\n\t\tselect { // Check for cancellation\n\t\tcase <-ctx.Done():\n\t\t\tres.StatusCode = http.StatusRequestTimeout\n\t\t\treturn res, ctx.Err()\n\t\tdefault:\n\t\t}\n\t\treqSize := rx.ContentLength - start\n\t\tif reqSize > chunkSize {\n\t\t\treqSize = chunkSize\n\t\t}\n\t\tr := io.NewSectionReader(rx.Media, start, reqSize)\n\t\treq, _ := http.NewRequest(\"POST\", rx.URI, r)\n\t\treq.ContentLength = reqSize\n\t\treq.Header.Set(\"Content-Range\", fmt.Sprintf(\"bytes %v-%v/%v\", start, start+reqSize-1, rx.ContentLength))\n\t\treq.Header.Set(\"Content-Type\", rx.MediaType)\n\t\treq.Header.Set(\"User-Agent\", rx.UserAgent)\n\t\tres, err = rx.Client.Do(req)\n\t\tstart += reqSize\n\t\tif err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) {\n\t\t\trx.mu.Lock()\n\t\t\trx.progress = start // keep track of number of bytes sent so far\n\t\t\trx.mu.Unlock()\n\t\t\tif rx.Callback != nil {\n\t\t\t\trx.Callback(start, rx.ContentLength)\n\t\t\t}\n\t\t}\n\t\tif err != nil || res.StatusCode != statusResumeIncomplete {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn res, err\n}\n\nvar sleep = time.Sleep // override in unit tests\n\n// Upload starts the process of a resumable upload with a cancellable context.\n// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled.\n// It is called from the auto-generated API code and is not visible to the user.\n// rx is private to the auto-generated API code.\nfunc (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) {\n\tvar res *http.Response\n\tvar err error\n\tfor {\n\t\tres, err = rx.transferChunks(ctx)\n\t\tif err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {\n\t\t\treturn res, err\n\t\t}\n\t\tselect { // Check for cancellation\n\t\tcase <-ctx.Done():\n\t\t\tres.StatusCode = http.StatusRequestTimeout\n\t\t\treturn res, ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tsleep(uploadPause)\n\t}\n\treturn res, err\n}\n\nfunc ResolveRelative(basestr, relstr string) string {\n\tu, _ := url.Parse(basestr)\n\trel, _ := url.Parse(relstr)\n\tu = u.ResolveReference(rel)\n\tus := u.String()\n\tus = strings.Replace(us, \"%7B\", \"{\", -1)\n\tus = strings.Replace(us, \"%7D\", \"}\", -1)\n\treturn us\n}\n\n// has4860Fix is whether this Go environment contains the fix for\n// http://golang.org/issue/4860\nvar has4860Fix bool\n\n// init initializes has4860Fix by checking the behavior of the net/http package.\nfunc init() {\n\tr := http.Request{\n\t\tURL: &url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tOpaque: \"//opaque\",\n\t\t},\n\t}\n\tb := &bytes.Buffer{}\n\tr.Write(b)\n\thas4860Fix = bytes.HasPrefix(b.Bytes(), []byte(\"GET http\"))\n}\n\n// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it\n// don't alter any hex-escaped characters in u.Path.\nfunc SetOpaque(u *url.URL) {\n\tu.Opaque = \"//\" + u.Host + u.Path\n\tif !has4860Fix {\n\t\tu.Opaque = u.Scheme + \":\" + u.Opaque\n\t}\n}\n\n// Expand subsitutes any {encoded} strings in the URL passed in using\n// the map supplied.\n//\n// This calls SetOpaque to avoid encoding of the parameters in the URL path.\nfunc Expand(u *url.URL, expansions map[string]string) {\n\texpanded, err := uritemplates.Expand(u.Path, expansions)\n\tif err == nil {\n\t\tu.Path = expanded\n\t\tSetOpaque(u)\n\t}\n}\n\n// CloseBody is used to close res.Body.\n// Prior to calling Close, it also tries to Read a small amount to see an EOF.\n// Not seeing an EOF can prevent HTTP Transports from reusing connections.\nfunc CloseBody(res *http.Response) {\n\tif res == nil || res.Body == nil {\n\t\treturn\n\t}\n\t// Justification for 3 byte reads: two for up to \"\\r\\n\" after\n\t// a JSON/XML document, and then 1 to see EOF if we haven't yet.\n\t// TODO(bradfitz): detect Go 1.3+ and skip these reads.\n\t// See https://codereview.appspot.com/58240043\n\t// and https://codereview.appspot.com/49570044\n\tbuf := make([]byte, 1)\n\tfor i := 0; i < 3; i++ {\n\t\t_, err := res.Body.Read(buf)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tres.Body.Close()\n\n}\n\n// VariantType returns the type name of the given variant.\n// If the map doesn't contain the named key or the value is not a []interface{}, \"\" is returned.\n// This is used to support \"variant\" APIs that can return one of a number of different types.\nfunc VariantType(t map[string]interface{}) string {\n\ts, _ := t[\"type\"].(string)\n\treturn s\n}\n\n// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.\n// This is used to support \"variant\" APIs that can return one of a number of different types.\n// It reports whether the conversion was successful.\nfunc ConvertVariant(v map[string]interface{}, dst interface{}) bool {\n\tvar buf bytes.Buffer\n\terr := json.NewEncoder(&buf).Encode(v)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn json.Unmarshal(buf.Bytes(), dst) == nil\n}\n\n// A Field names a field to be retrieved with a partial response.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n//\n// Partial responses can dramatically reduce the amount of data that must be sent to your application.\n// In order to request partial responses, you can specify the full list of fields\n// that your application needs by adding the Fields option to your request.\n//\n// Field strings use camelCase with leading lower-case characters to identify fields within the response.\n//\n// For example, if your response has a \"NextPageToken\" and a slice of \"Items\" with \"Id\" fields,\n// you could request just those fields like this:\n//\n//     svc.Events.List().Fields(\"nextPageToken\", \"items/id\").Do()\n//\n// or if you were also interested in each Item's \"Updated\" field, you can combine them like this:\n//\n//     svc.Events.List().Fields(\"nextPageToken\", \"items(id,updated)\").Do()\n//\n// More information about field formatting can be found here:\n// https://developers.google.com/+/api/#fields-syntax\n//\n// Another way to find field names is through the Google API explorer:\n// https://developers.google.com/apis-explorer/#p/\ntype Field string\n\n// CombineFields combines fields into a single string.\nfunc CombineFields(s []Field) string {\n\tr := make([]string, len(s))\n\tfor i, v := range s {\n\t\tr[i] = string(v)\n\t}\n\treturn strings.Join(r, \",\")\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE",
    "content": "Copyright (c) 2013 Joshua Tacoma\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go",
    "content": "// Copyright 2013 Joshua Tacoma. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package uritemplates is a level 4 implementation of RFC 6570 (URI\n// Template, http://tools.ietf.org/html/rfc6570).\n//\n// To use uritemplates, parse a template string and expand it with a value\n// map:\n//\n//\ttemplate, _ := uritemplates.Parse(\"https://api.github.com/repos{/user,repo}\")\n//\tvalues := make(map[string]interface{})\n//\tvalues[\"user\"] = \"jtacoma\"\n//\tvalues[\"repo\"] = \"uritemplates\"\n//\texpanded, _ := template.ExpandString(values)\n//\tfmt.Printf(expanded)\n//\npackage uritemplates\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tunreserved = regexp.MustCompile(\"[^A-Za-z0-9\\\\-._~]\")\n\treserved   = regexp.MustCompile(\"[^A-Za-z0-9\\\\-._~:/?#[\\\\]@!$&'()*+,;=]\")\n\tvalidname  = regexp.MustCompile(\"^([A-Za-z0-9_\\\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$\")\n\thex        = []byte(\"0123456789ABCDEF\")\n)\n\nfunc pctEncode(src []byte) []byte {\n\tdst := make([]byte, len(src)*3)\n\tfor i, b := range src {\n\t\tbuf := dst[i*3 : i*3+3]\n\t\tbuf[0] = 0x25\n\t\tbuf[1] = hex[b/16]\n\t\tbuf[2] = hex[b%16]\n\t}\n\treturn dst\n}\n\nfunc escape(s string, allowReserved bool) (escaped string) {\n\tif allowReserved {\n\t\tescaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))\n\t} else {\n\t\tescaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))\n\t}\n\treturn escaped\n}\n\n// A UriTemplate is a parsed representation of a URI template.\ntype UriTemplate struct {\n\traw   string\n\tparts []templatePart\n}\n\n// Parse parses a URI template string into a UriTemplate object.\nfunc Parse(rawtemplate string) (template *UriTemplate, err error) {\n\ttemplate = new(UriTemplate)\n\ttemplate.raw = rawtemplate\n\tsplit := strings.Split(rawtemplate, \"{\")\n\ttemplate.parts = make([]templatePart, len(split)*2-1)\n\tfor i, s := range split {\n\t\tif i == 0 {\n\t\t\tif strings.Contains(s, \"}\") {\n\t\t\t\terr = errors.New(\"unexpected }\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemplate.parts[i].raw = s\n\t\t} else {\n\t\t\tsubsplit := strings.Split(s, \"}\")\n\t\t\tif len(subsplit) != 2 {\n\t\t\t\terr = errors.New(\"malformed template\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texpression := subsplit[0]\n\t\t\ttemplate.parts[i*2-1], err = parseExpression(expression)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemplate.parts[i*2].raw = subsplit[1]\n\t\t}\n\t}\n\tif err != nil {\n\t\ttemplate = nil\n\t}\n\treturn template, err\n}\n\ntype templatePart struct {\n\traw           string\n\tterms         []templateTerm\n\tfirst         string\n\tsep           string\n\tnamed         bool\n\tifemp         string\n\tallowReserved bool\n}\n\ntype templateTerm struct {\n\tname     string\n\texplode  bool\n\ttruncate int\n}\n\nfunc parseExpression(expression string) (result templatePart, err error) {\n\tswitch expression[0] {\n\tcase '+':\n\t\tresult.sep = \",\"\n\t\tresult.allowReserved = true\n\t\texpression = expression[1:]\n\tcase '.':\n\t\tresult.first = \".\"\n\t\tresult.sep = \".\"\n\t\texpression = expression[1:]\n\tcase '/':\n\t\tresult.first = \"/\"\n\t\tresult.sep = \"/\"\n\t\texpression = expression[1:]\n\tcase ';':\n\t\tresult.first = \";\"\n\t\tresult.sep = \";\"\n\t\tresult.named = true\n\t\texpression = expression[1:]\n\tcase '?':\n\t\tresult.first = \"?\"\n\t\tresult.sep = \"&\"\n\t\tresult.named = true\n\t\tresult.ifemp = \"=\"\n\t\texpression = expression[1:]\n\tcase '&':\n\t\tresult.first = \"&\"\n\t\tresult.sep = \"&\"\n\t\tresult.named = true\n\t\tresult.ifemp = \"=\"\n\t\texpression = expression[1:]\n\tcase '#':\n\t\tresult.first = \"#\"\n\t\tresult.sep = \",\"\n\t\tresult.allowReserved = true\n\t\texpression = expression[1:]\n\tdefault:\n\t\tresult.sep = \",\"\n\t}\n\trawterms := strings.Split(expression, \",\")\n\tresult.terms = make([]templateTerm, len(rawterms))\n\tfor i, raw := range rawterms {\n\t\tresult.terms[i], err = parseTerm(raw)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn result, err\n}\n\nfunc parseTerm(term string) (result templateTerm, err error) {\n\tif strings.HasSuffix(term, \"*\") {\n\t\tresult.explode = true\n\t\tterm = term[:len(term)-1]\n\t}\n\tsplit := strings.Split(term, \":\")\n\tif len(split) == 1 {\n\t\tresult.name = term\n\t} else if len(split) == 2 {\n\t\tresult.name = split[0]\n\t\tvar parsed int64\n\t\tparsed, err = strconv.ParseInt(split[1], 10, 0)\n\t\tresult.truncate = int(parsed)\n\t} else {\n\t\terr = errors.New(\"multiple colons in same term\")\n\t}\n\tif !validname.MatchString(result.name) {\n\t\terr = errors.New(\"not a valid name: \" + result.name)\n\t}\n\tif result.explode && result.truncate > 0 {\n\t\terr = errors.New(\"both explode and prefix modifers on same term\")\n\t}\n\treturn result, err\n}\n\n// Expand expands a URI template with a set of values to produce a string.\nfunc (self *UriTemplate) Expand(value interface{}) (string, error) {\n\tvalues, ismap := value.(map[string]interface{})\n\tif !ismap {\n\t\tif m, ismap := struct2map(value); !ismap {\n\t\t\treturn \"\", errors.New(\"expected map[string]interface{}, struct, or pointer to struct.\")\n\t\t} else {\n\t\t\treturn self.Expand(m)\n\t\t}\n\t}\n\tvar buf bytes.Buffer\n\tfor _, p := range self.parts {\n\t\terr := p.expand(&buf, values)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn buf.String(), nil\n}\n\nfunc (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {\n\tif len(self.raw) > 0 {\n\t\tbuf.WriteString(self.raw)\n\t\treturn nil\n\t}\n\tvar zeroLen = buf.Len()\n\tbuf.WriteString(self.first)\n\tvar firstLen = buf.Len()\n\tfor _, term := range self.terms {\n\t\tvalue, exists := values[term.name]\n\t\tif !exists {\n\t\t\tcontinue\n\t\t}\n\t\tif buf.Len() != firstLen {\n\t\t\tbuf.WriteString(self.sep)\n\t\t}\n\t\tswitch v := value.(type) {\n\t\tcase string:\n\t\t\tself.expandString(buf, term, v)\n\t\tcase []interface{}:\n\t\t\tself.expandArray(buf, term, v)\n\t\tcase map[string]interface{}:\n\t\t\tif term.truncate > 0 {\n\t\t\t\treturn errors.New(\"cannot truncate a map expansion\")\n\t\t\t}\n\t\t\tself.expandMap(buf, term, v)\n\t\tdefault:\n\t\t\tif m, ismap := struct2map(value); ismap {\n\t\t\t\tif term.truncate > 0 {\n\t\t\t\t\treturn errors.New(\"cannot truncate a map expansion\")\n\t\t\t\t}\n\t\t\t\tself.expandMap(buf, term, m)\n\t\t\t} else {\n\t\t\t\tstr := fmt.Sprintf(\"%v\", value)\n\t\t\t\tself.expandString(buf, term, str)\n\t\t\t}\n\t\t}\n\t}\n\tif buf.Len() == firstLen {\n\t\toriginal := buf.Bytes()[:zeroLen]\n\t\tbuf.Reset()\n\t\tbuf.Write(original)\n\t}\n\treturn nil\n}\n\nfunc (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {\n\tif self.named {\n\t\tbuf.WriteString(name)\n\t\tif empty {\n\t\t\tbuf.WriteString(self.ifemp)\n\t\t} else {\n\t\t\tbuf.WriteString(\"=\")\n\t\t}\n\t}\n}\n\nfunc (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {\n\tif len(s) > t.truncate && t.truncate > 0 {\n\t\ts = s[:t.truncate]\n\t}\n\tself.expandName(buf, t.name, len(s) == 0)\n\tbuf.WriteString(escape(s, self.allowReserved))\n}\n\nfunc (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {\n\tif len(a) == 0 {\n\t\treturn\n\t} else if !t.explode {\n\t\tself.expandName(buf, t.name, false)\n\t}\n\tfor i, value := range a {\n\t\tif t.explode && i > 0 {\n\t\t\tbuf.WriteString(self.sep)\n\t\t} else if i > 0 {\n\t\t\tbuf.WriteString(\",\")\n\t\t}\n\t\tvar s string\n\t\tswitch v := value.(type) {\n\t\tcase string:\n\t\t\ts = v\n\t\tdefault:\n\t\t\ts = fmt.Sprintf(\"%v\", v)\n\t\t}\n\t\tif len(s) > t.truncate && t.truncate > 0 {\n\t\t\ts = s[:t.truncate]\n\t\t}\n\t\tif self.named && t.explode {\n\t\t\tself.expandName(buf, t.name, len(s) == 0)\n\t\t}\n\t\tbuf.WriteString(escape(s, self.allowReserved))\n\t}\n}\n\nfunc (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {\n\tif len(m) == 0 {\n\t\treturn\n\t}\n\tif !t.explode {\n\t\tself.expandName(buf, t.name, len(m) == 0)\n\t}\n\tvar firstLen = buf.Len()\n\tfor k, value := range m {\n\t\tif firstLen != buf.Len() {\n\t\t\tif t.explode {\n\t\t\t\tbuf.WriteString(self.sep)\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(\",\")\n\t\t\t}\n\t\t}\n\t\tvar s string\n\t\tswitch v := value.(type) {\n\t\tcase string:\n\t\t\ts = v\n\t\tdefault:\n\t\t\ts = fmt.Sprintf(\"%v\", v)\n\t\t}\n\t\tif t.explode {\n\t\t\tbuf.WriteString(escape(k, self.allowReserved))\n\t\t\tbuf.WriteRune('=')\n\t\t\tbuf.WriteString(escape(s, self.allowReserved))\n\t\t} else {\n\t\t\tbuf.WriteString(escape(k, self.allowReserved))\n\t\t\tbuf.WriteRune(',')\n\t\t\tbuf.WriteString(escape(s, self.allowReserved))\n\t\t}\n\t}\n}\n\nfunc struct2map(v interface{}) (map[string]interface{}, bool) {\n\tvalue := reflect.ValueOf(v)\n\tswitch value.Type().Kind() {\n\tcase reflect.Ptr:\n\t\treturn struct2map(value.Elem().Interface())\n\tcase reflect.Struct:\n\t\tm := make(map[string]interface{})\n\t\tfor i := 0; i < value.NumField(); i++ {\n\t\t\ttag := value.Type().Field(i).Tag\n\t\t\tvar name string\n\t\t\tif strings.Contains(string(tag), \":\") {\n\t\t\t\tname = tag.Get(\"uri\")\n\t\t\t} else {\n\t\t\t\tname = strings.TrimSpace(string(tag))\n\t\t\t}\n\t\t\tif len(name) == 0 {\n\t\t\t\tname = value.Type().Field(i).Name\n\t\t\t}\n\t\t\tm[name] = value.Field(i).Interface()\n\t\t}\n\t\treturn m, true\n\t}\n\treturn nil, false\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go",
    "content": "package uritemplates\n\nfunc Expand(path string, expansions map[string]string) (string, error) {\n\ttemplate, err := Parse(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvalues := make(map[string]interface{})\n\tfor k, v := range expansions {\n\t\tvalues[k] = v\n\t}\n\treturn template.Expand(values)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/googleapi/transport/apikey.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package transport contains HTTP transports used to make\n// authenticated API requests.\npackage transport\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n)\n\n// APIKey is an HTTP Transport which wraps an underlying transport and\n// appends an API Key \"key\" parameter to the URL of outgoing requests.\ntype APIKey struct {\n\t// Key is the API Key to set on requests.\n\tKey string\n\n\t// Transport is the underlying HTTP transport.\n\t// If nil, http.DefaultTransport is used.\n\tTransport http.RoundTripper\n}\n\nfunc (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {\n\trt := t.Transport\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t\tif rt == nil {\n\t\t\treturn nil, errors.New(\"googleapi/transport: no Transport specified or available\")\n\t\t}\n\t}\n\tnewReq := *req\n\targs := newReq.URL.Query()\n\targs.Set(\"key\", t.Key)\n\tnewReq.URL.RawQuery = args.Encode()\n\treturn rt.RoundTrip(&newReq)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/googleapi/types.go",
    "content": "// Copyright 2013 Google Inc. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage googleapi\n\nimport (\n\t\"encoding/json\"\n\t\"strconv\"\n)\n\n// Int64s is a slice of int64s that marshal as quoted strings in JSON.\ntype Int64s []int64\n\nfunc (q *Int64s) UnmarshalJSON(raw []byte) error {\n\t*q = (*q)[:0]\n\tvar ss []string\n\tif err := json.Unmarshal(raw, &ss); err != nil {\n\t\treturn err\n\t}\n\tfor _, s := range ss {\n\t\tv, err := strconv.ParseInt(s, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*q = append(*q, int64(v))\n\t}\n\treturn nil\n}\n\n// Int32s is a slice of int32s that marshal as quoted strings in JSON.\ntype Int32s []int32\n\nfunc (q *Int32s) UnmarshalJSON(raw []byte) error {\n\t*q = (*q)[:0]\n\tvar ss []string\n\tif err := json.Unmarshal(raw, &ss); err != nil {\n\t\treturn err\n\t}\n\tfor _, s := range ss {\n\t\tv, err := strconv.ParseInt(s, 10, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*q = append(*q, int32(v))\n\t}\n\treturn nil\n}\n\n// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.\ntype Uint64s []uint64\n\nfunc (q *Uint64s) UnmarshalJSON(raw []byte) error {\n\t*q = (*q)[:0]\n\tvar ss []string\n\tif err := json.Unmarshal(raw, &ss); err != nil {\n\t\treturn err\n\t}\n\tfor _, s := range ss {\n\t\tv, err := strconv.ParseUint(s, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*q = append(*q, uint64(v))\n\t}\n\treturn nil\n}\n\n// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.\ntype Uint32s []uint32\n\nfunc (q *Uint32s) UnmarshalJSON(raw []byte) error {\n\t*q = (*q)[:0]\n\tvar ss []string\n\tif err := json.Unmarshal(raw, &ss); err != nil {\n\t\treturn err\n\t}\n\tfor _, s := range ss {\n\t\tv, err := strconv.ParseUint(s, 10, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*q = append(*q, uint32(v))\n\t}\n\treturn nil\n}\n\n// Float64s is a slice of float64s that marshal as quoted strings in JSON.\ntype Float64s []float64\n\nfunc (q *Float64s) UnmarshalJSON(raw []byte) error {\n\t*q = (*q)[:0]\n\tvar ss []string\n\tif err := json.Unmarshal(raw, &ss); err != nil {\n\t\treturn err\n\t}\n\tfor _, s := range ss {\n\t\tv, err := strconv.ParseFloat(s, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*q = append(*q, float64(v))\n\t}\n\treturn nil\n}\n\nfunc quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {\n\tdst := make([]byte, 0, 2+n*10) // somewhat arbitrary\n\tdst = append(dst, '[')\n\tfor i := 0; i < n; i++ {\n\t\tif i > 0 {\n\t\t\tdst = append(dst, ',')\n\t\t}\n\t\tdst = append(dst, '\"')\n\t\tdst = fn(dst, i)\n\t\tdst = append(dst, '\"')\n\t}\n\tdst = append(dst, ']')\n\treturn dst, nil\n}\n\nfunc (s Int64s) MarshalJSON() ([]byte, error) {\n\treturn quotedList(len(s), func(dst []byte, i int) []byte {\n\t\treturn strconv.AppendInt(dst, s[i], 10)\n\t})\n}\n\nfunc (s Int32s) MarshalJSON() ([]byte, error) {\n\treturn quotedList(len(s), func(dst []byte, i int) []byte {\n\t\treturn strconv.AppendInt(dst, int64(s[i]), 10)\n\t})\n}\n\nfunc (s Uint64s) MarshalJSON() ([]byte, error) {\n\treturn quotedList(len(s), func(dst []byte, i int) []byte {\n\t\treturn strconv.AppendUint(dst, s[i], 10)\n\t})\n}\n\nfunc (s Uint32s) MarshalJSON() ([]byte, error) {\n\treturn quotedList(len(s), func(dst []byte, i int) []byte {\n\t\treturn strconv.AppendUint(dst, uint64(s[i]), 10)\n\t})\n}\n\nfunc (s Float64s) MarshalJSON() ([]byte, error) {\n\treturn quotedList(len(s), func(dst []byte, i int) []byte {\n\t\treturn strconv.AppendFloat(dst, s[i], 'g', -1, 64)\n\t})\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json",
    "content": "{\n \"kind\": \"discovery#restDescription\",\n \"etag\": \"\\\"ye6orv2F-1npMW3u9suM3a7C5Bo/k747AQVNKzUoa08QT-Z1GxOMZC0\\\"\",\n \"discoveryVersion\": \"v1\",\n \"id\": \"pubsub:v1beta2\",\n \"name\": \"pubsub\",\n \"version\": \"v1beta2\",\n \"revision\": \"20150326\",\n \"title\": \"Google Cloud Pub/Sub API\",\n \"description\": \"Provides reliable, many-to-many, asynchronous messaging between applications.\",\n \"ownerDomain\": \"google.com\",\n \"ownerName\": \"Google\",\n \"icons\": {\n  \"x16\": \"http://www.google.com/images/icons/product/search-16.gif\",\n  \"x32\": \"http://www.google.com/images/icons/product/search-32.gif\"\n },\n \"documentationLink\": \"\",\n \"protocol\": \"rest\",\n \"baseUrl\": \"https://pubsub.googleapis.com/v1beta2/\",\n \"basePath\": \"/v1beta2/\",\n \"rootUrl\": \"https://pubsub.googleapis.com/\",\n \"servicePath\": \"v1beta2/\",\n \"batchPath\": \"batch\",\n \"parameters\": {\n  \"alt\": {\n   \"type\": \"string\",\n   \"description\": \"Data format for the response.\",\n   \"default\": \"json\",\n   \"enum\": [\n    \"json\"\n   ],\n   \"enumDescriptions\": [\n    \"Responses with Content-Type of application/json\"\n   ],\n   \"location\": \"query\"\n  },\n  \"fields\": {\n   \"type\": \"string\",\n   \"description\": \"Selector specifying which fields to include in a partial response.\",\n   \"location\": \"query\"\n  },\n  \"key\": {\n   \"type\": \"string\",\n   \"description\": \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n   \"location\": \"query\"\n  },\n  \"oauth_token\": {\n   \"type\": \"string\",\n   \"description\": \"OAuth 2.0 token for the current user.\",\n   \"location\": \"query\"\n  },\n  \"prettyPrint\": {\n   \"type\": \"boolean\",\n   \"description\": \"Returns response with indentations and line breaks.\",\n   \"default\": \"true\",\n   \"location\": \"query\"\n  },\n  \"quotaUser\": {\n   \"type\": \"string\",\n   \"description\": \"Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.\",\n   \"location\": \"query\"\n  },\n  \"userIp\": {\n   \"type\": \"string\",\n   \"description\": \"IP address of the site where the request originates. Use this if you want to enforce per-user limits.\",\n   \"location\": \"query\"\n  }\n },\n \"auth\": {\n  \"oauth2\": {\n   \"scopes\": {\n    \"https://www.googleapis.com/auth/cloud-platform\": {\n     \"description\": \"View and manage your data across Google Cloud Platform services\"\n    },\n    \"https://www.googleapis.com/auth/pubsub\": {\n     \"description\": \"View and manage Pub/Sub topics and subscriptions\"\n    }\n   }\n  }\n },\n \"schemas\": {\n  \"AcknowledgeRequest\": {\n   \"id\": \"AcknowledgeRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"ackIds\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    }\n   }\n  },\n  \"Empty\": {\n   \"id\": \"Empty\",\n   \"type\": \"object\"\n  },\n  \"ListSubscriptionsResponse\": {\n   \"id\": \"ListSubscriptionsResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"nextPageToken\": {\n     \"type\": \"string\"\n    },\n    \"subscriptions\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"$ref\": \"Subscription\"\n     }\n    }\n   }\n  },\n  \"ListTopicSubscriptionsResponse\": {\n   \"id\": \"ListTopicSubscriptionsResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"nextPageToken\": {\n     \"type\": \"string\"\n    },\n    \"subscriptions\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    }\n   }\n  },\n  \"ListTopicsResponse\": {\n   \"id\": \"ListTopicsResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"nextPageToken\": {\n     \"type\": \"string\"\n    },\n    \"topics\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"$ref\": \"Topic\"\n     }\n    }\n   }\n  },\n  \"ModifyAckDeadlineRequest\": {\n   \"id\": \"ModifyAckDeadlineRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"ackDeadlineSeconds\": {\n     \"type\": \"integer\",\n     \"format\": \"int32\"\n    },\n    \"ackId\": {\n     \"type\": \"string\"\n    }\n   }\n  },\n  \"ModifyPushConfigRequest\": {\n   \"id\": \"ModifyPushConfigRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"pushConfig\": {\n     \"$ref\": \"PushConfig\"\n    }\n   }\n  },\n  \"PublishRequest\": {\n   \"id\": \"PublishRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"messages\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"$ref\": \"PubsubMessage\"\n     }\n    }\n   }\n  },\n  \"PublishResponse\": {\n   \"id\": \"PublishResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"messageIds\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    }\n   }\n  },\n  \"PubsubMessage\": {\n   \"id\": \"PubsubMessage\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"attributes\": {\n     \"type\": \"object\",\n     \"additionalProperties\": {\n      \"type\": \"string\"\n     }\n    },\n    \"data\": {\n     \"type\": \"string\",\n     \"format\": \"byte\"\n    },\n    \"messageId\": {\n     \"type\": \"string\"\n    }\n   }\n  },\n  \"PullRequest\": {\n   \"id\": \"PullRequest\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"maxMessages\": {\n     \"type\": \"integer\",\n     \"format\": \"int32\"\n    },\n    \"returnImmediately\": {\n     \"type\": \"boolean\"\n    }\n   }\n  },\n  \"PullResponse\": {\n   \"id\": \"PullResponse\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"receivedMessages\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"$ref\": \"ReceivedMessage\"\n     }\n    }\n   }\n  },\n  \"PushConfig\": {\n   \"id\": \"PushConfig\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"attributes\": {\n     \"type\": \"object\",\n     \"additionalProperties\": {\n      \"type\": \"string\"\n     }\n    },\n    \"pushEndpoint\": {\n     \"type\": \"string\"\n    }\n   }\n  },\n  \"ReceivedMessage\": {\n   \"id\": \"ReceivedMessage\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"ackId\": {\n     \"type\": \"string\"\n    },\n    \"message\": {\n     \"$ref\": \"PubsubMessage\"\n    }\n   }\n  },\n  \"Subscription\": {\n   \"id\": \"Subscription\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"ackDeadlineSeconds\": {\n     \"type\": \"integer\",\n     \"format\": \"int32\"\n    },\n    \"name\": {\n     \"type\": \"string\"\n    },\n    \"pushConfig\": {\n     \"$ref\": \"PushConfig\"\n    },\n    \"topic\": {\n     \"type\": \"string\"\n    }\n   }\n  },\n  \"Topic\": {\n   \"id\": \"Topic\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"name\": {\n     \"type\": \"string\"\n    }\n   }\n  }\n },\n \"resources\": {\n  \"projects\": {\n   \"resources\": {\n    \"subscriptions\": {\n     \"methods\": {\n      \"acknowledge\": {\n       \"id\": \"pubsub.projects.subscriptions.acknowledge\",\n       \"path\": \"{+subscription}:acknowledge\",\n       \"httpMethod\": \"POST\",\n       \"description\": \"Acknowledges the messages associated with the ack tokens in the AcknowledgeRequest. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.\",\n       \"parameters\": {\n        \"subscription\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"subscription\"\n       ],\n       \"request\": {\n        \"$ref\": \"AcknowledgeRequest\"\n       },\n       \"response\": {\n        \"$ref\": \"Empty\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"create\": {\n       \"id\": \"pubsub.projects.subscriptions.create\",\n       \"path\": \"{+name}\",\n       \"httpMethod\": \"PUT\",\n       \"description\": \"Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.\",\n       \"parameters\": {\n        \"name\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"name\"\n       ],\n       \"request\": {\n        \"$ref\": \"Subscription\"\n       },\n       \"response\": {\n        \"$ref\": \"Subscription\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"delete\": {\n       \"id\": \"pubsub.projects.subscriptions.delete\",\n       \"path\": \"{+subscription}\",\n       \"httpMethod\": \"DELETE\",\n       \"description\": \"Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to Pull after deletion will return NOT_FOUND. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.\",\n       \"parameters\": {\n        \"subscription\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"subscription\"\n       ],\n       \"response\": {\n        \"$ref\": \"Empty\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"get\": {\n       \"id\": \"pubsub.projects.subscriptions.get\",\n       \"path\": \"{+subscription}\",\n       \"httpMethod\": \"GET\",\n       \"description\": \"Gets the configuration details of a subscription.\",\n       \"parameters\": {\n        \"subscription\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"subscription\"\n       ],\n       \"response\": {\n        \"$ref\": \"Subscription\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"list\": {\n       \"id\": \"pubsub.projects.subscriptions.list\",\n       \"path\": \"{+project}/subscriptions\",\n       \"httpMethod\": \"GET\",\n       \"description\": \"Lists matching subscriptions.\",\n       \"parameters\": {\n        \"pageSize\": {\n         \"type\": \"integer\",\n         \"format\": \"int32\",\n         \"location\": \"query\"\n        },\n        \"pageToken\": {\n         \"type\": \"string\",\n         \"location\": \"query\"\n        },\n        \"project\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"project\"\n       ],\n       \"response\": {\n        \"$ref\": \"ListSubscriptionsResponse\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"modifyAckDeadline\": {\n       \"id\": \"pubsub.projects.subscriptions.modifyAckDeadline\",\n       \"path\": \"{+subscription}:modifyAckDeadline\",\n       \"httpMethod\": \"POST\",\n       \"description\": \"Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.\",\n       \"parameters\": {\n        \"subscription\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"subscription\"\n       ],\n       \"request\": {\n        \"$ref\": \"ModifyAckDeadlineRequest\"\n       },\n       \"response\": {\n        \"$ref\": \"Empty\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"modifyPushConfig\": {\n       \"id\": \"pubsub.projects.subscriptions.modifyPushConfig\",\n       \"path\": \"{+subscription}:modifyPushConfig\",\n       \"httpMethod\": \"POST\",\n       \"description\": \"Modifies the PushConfig for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty PushConfig) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the PushConfig.\",\n       \"parameters\": {\n        \"subscription\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"subscription\"\n       ],\n       \"request\": {\n        \"$ref\": \"ModifyPushConfigRequest\"\n       },\n       \"response\": {\n        \"$ref\": \"Empty\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"pull\": {\n       \"id\": \"pubsub.projects.subscriptions.pull\",\n       \"path\": \"{+subscription}:pull\",\n       \"httpMethod\": \"POST\",\n       \"description\": \"Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return UNAVAILABLE if there are too many concurrent pull requests pending for the given subscription.\",\n       \"parameters\": {\n        \"subscription\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"subscription\"\n       ],\n       \"request\": {\n        \"$ref\": \"PullRequest\"\n       },\n       \"response\": {\n        \"$ref\": \"PullResponse\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      }\n     }\n    },\n    \"topics\": {\n     \"methods\": {\n      \"create\": {\n       \"id\": \"pubsub.projects.topics.create\",\n       \"path\": \"{+name}\",\n       \"httpMethod\": \"PUT\",\n       \"description\": \"Creates the given topic with the given name.\",\n       \"parameters\": {\n        \"name\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"name\"\n       ],\n       \"request\": {\n        \"$ref\": \"Topic\"\n       },\n       \"response\": {\n        \"$ref\": \"Topic\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"delete\": {\n       \"id\": \"pubsub.projects.topics.delete\",\n       \"path\": \"{+topic}\",\n       \"httpMethod\": \"DELETE\",\n       \"description\": \"Deletes the topic with the given name. Returns NOT_FOUND if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted.\",\n       \"parameters\": {\n        \"topic\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"topic\"\n       ],\n       \"response\": {\n        \"$ref\": \"Empty\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"get\": {\n       \"id\": \"pubsub.projects.topics.get\",\n       \"path\": \"{+topic}\",\n       \"httpMethod\": \"GET\",\n       \"description\": \"Gets the configuration of a topic.\",\n       \"parameters\": {\n        \"topic\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"topic\"\n       ],\n       \"response\": {\n        \"$ref\": \"Topic\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"list\": {\n       \"id\": \"pubsub.projects.topics.list\",\n       \"path\": \"{+project}/topics\",\n       \"httpMethod\": \"GET\",\n       \"description\": \"Lists matching topics.\",\n       \"parameters\": {\n        \"pageSize\": {\n         \"type\": \"integer\",\n         \"format\": \"int32\",\n         \"location\": \"query\"\n        },\n        \"pageToken\": {\n         \"type\": \"string\",\n         \"location\": \"query\"\n        },\n        \"project\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"project\"\n       ],\n       \"response\": {\n        \"$ref\": \"ListTopicsResponse\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      },\n      \"publish\": {\n       \"id\": \"pubsub.projects.topics.publish\",\n       \"path\": \"{+topic}:publish\",\n       \"httpMethod\": \"POST\",\n       \"description\": \"Adds one or more messages to the topic. Returns NOT_FOUND if the topic does not exist.\",\n       \"parameters\": {\n        \"topic\": {\n         \"type\": \"string\",\n         \"required\": true,\n         \"location\": \"path\"\n        }\n       },\n       \"parameterOrder\": [\n        \"topic\"\n       ],\n       \"request\": {\n        \"$ref\": \"PublishRequest\"\n       },\n       \"response\": {\n        \"$ref\": \"PublishResponse\"\n       },\n       \"scopes\": [\n        \"https://www.googleapis.com/auth/cloud-platform\",\n        \"https://www.googleapis.com/auth/pubsub\"\n       ]\n      }\n     },\n     \"resources\": {\n      \"subscriptions\": {\n       \"methods\": {\n        \"list\": {\n         \"id\": \"pubsub.projects.topics.subscriptions.list\",\n         \"path\": \"{+topic}/subscriptions\",\n         \"httpMethod\": \"GET\",\n         \"description\": \"Lists the name of the subscriptions for this topic.\",\n         \"parameters\": {\n          \"pageSize\": {\n           \"type\": \"integer\",\n           \"format\": \"int32\",\n           \"location\": \"query\"\n          },\n          \"pageToken\": {\n           \"type\": \"string\",\n           \"location\": \"query\"\n          },\n          \"topic\": {\n           \"type\": \"string\",\n           \"required\": true,\n           \"location\": \"path\"\n          }\n         },\n         \"parameterOrder\": [\n          \"topic\"\n         ],\n         \"response\": {\n          \"$ref\": \"ListTopicSubscriptionsResponse\"\n         },\n         \"scopes\": [\n          \"https://www.googleapis.com/auth/cloud-platform\",\n          \"https://www.googleapis.com/auth/pubsub\"\n         ]\n        }\n       }\n      }\n     }\n    }\n   }\n  }\n }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go",
    "content": "// Package pubsub provides access to the Google Cloud Pub/Sub API.\n//\n// Usage example:\n//\n//   import \"google.golang.org/api/pubsub/v1beta2\"\n//   ...\n//   pubsubService, err := pubsub.New(oauthHttpClient)\npackage pubsub\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/googleapi\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Always reference these packages, just in case the auto-generated code\n// below doesn't.\nvar _ = bytes.NewBuffer\nvar _ = strconv.Itoa\nvar _ = fmt.Sprintf\nvar _ = json.NewDecoder\nvar _ = io.Copy\nvar _ = url.Parse\nvar _ = googleapi.Version\nvar _ = errors.New\nvar _ = strings.Replace\nvar _ = context.Background\n\nconst apiId = \"pubsub:v1beta2\"\nconst apiName = \"pubsub\"\nconst apiVersion = \"v1beta2\"\nconst basePath = \"https://pubsub.googleapis.com/v1beta2/\"\n\n// OAuth2 scopes used by this API.\nconst (\n\t// View and manage your data across Google Cloud Platform services\n\tCloudPlatformScope = \"https://www.googleapis.com/auth/cloud-platform\"\n\n\t// View and manage Pub/Sub topics and subscriptions\n\tPubsubScope = \"https://www.googleapis.com/auth/pubsub\"\n)\n\nfunc New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}\n\ntype Service struct {\n\tclient    *http.Client\n\tBasePath  string // API endpoint base URL\n\tUserAgent string // optional additional User-Agent fragment\n\n\tProjects *ProjectsService\n}\n\nfunc (s *Service) userAgent() string {\n\tif s.UserAgent == \"\" {\n\t\treturn googleapi.UserAgent\n\t}\n\treturn googleapi.UserAgent + \" \" + s.UserAgent\n}\n\nfunc NewProjectsService(s *Service) *ProjectsService {\n\trs := &ProjectsService{s: s}\n\trs.Subscriptions = NewProjectsSubscriptionsService(s)\n\trs.Topics = NewProjectsTopicsService(s)\n\treturn rs\n}\n\ntype ProjectsService struct {\n\ts *Service\n\n\tSubscriptions *ProjectsSubscriptionsService\n\n\tTopics *ProjectsTopicsService\n}\n\nfunc NewProjectsSubscriptionsService(s *Service) *ProjectsSubscriptionsService {\n\trs := &ProjectsSubscriptionsService{s: s}\n\treturn rs\n}\n\ntype ProjectsSubscriptionsService struct {\n\ts *Service\n}\n\nfunc NewProjectsTopicsService(s *Service) *ProjectsTopicsService {\n\trs := &ProjectsTopicsService{s: s}\n\trs.Subscriptions = NewProjectsTopicsSubscriptionsService(s)\n\treturn rs\n}\n\ntype ProjectsTopicsService struct {\n\ts *Service\n\n\tSubscriptions *ProjectsTopicsSubscriptionsService\n}\n\nfunc NewProjectsTopicsSubscriptionsService(s *Service) *ProjectsTopicsSubscriptionsService {\n\trs := &ProjectsTopicsSubscriptionsService{s: s}\n\treturn rs\n}\n\ntype ProjectsTopicsSubscriptionsService struct {\n\ts *Service\n}\n\ntype AcknowledgeRequest struct {\n\tAckIds []string `json:\"ackIds,omitempty\"`\n}\n\ntype Empty struct {\n}\n\ntype ListSubscriptionsResponse struct {\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n\n\tSubscriptions []*Subscription `json:\"subscriptions,omitempty\"`\n}\n\ntype ListTopicSubscriptionsResponse struct {\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n\n\tSubscriptions []string `json:\"subscriptions,omitempty\"`\n}\n\ntype ListTopicsResponse struct {\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n\n\tTopics []*Topic `json:\"topics,omitempty\"`\n}\n\ntype ModifyAckDeadlineRequest struct {\n\tAckDeadlineSeconds int64 `json:\"ackDeadlineSeconds,omitempty\"`\n\n\tAckId string `json:\"ackId,omitempty\"`\n}\n\ntype ModifyPushConfigRequest struct {\n\tPushConfig *PushConfig `json:\"pushConfig,omitempty\"`\n}\n\ntype PublishRequest struct {\n\tMessages []*PubsubMessage `json:\"messages,omitempty\"`\n}\n\ntype PublishResponse struct {\n\tMessageIds []string `json:\"messageIds,omitempty\"`\n}\n\ntype PubsubMessage struct {\n\tAttributes map[string]string `json:\"attributes,omitempty\"`\n\n\tData string `json:\"data,omitempty\"`\n\n\tMessageId string `json:\"messageId,omitempty\"`\n}\n\ntype PullRequest struct {\n\tMaxMessages int64 `json:\"maxMessages,omitempty\"`\n\n\tReturnImmediately bool `json:\"returnImmediately,omitempty\"`\n}\n\ntype PullResponse struct {\n\tReceivedMessages []*ReceivedMessage `json:\"receivedMessages,omitempty\"`\n}\n\ntype PushConfig struct {\n\tAttributes map[string]string `json:\"attributes,omitempty\"`\n\n\tPushEndpoint string `json:\"pushEndpoint,omitempty\"`\n}\n\ntype ReceivedMessage struct {\n\tAckId string `json:\"ackId,omitempty\"`\n\n\tMessage *PubsubMessage `json:\"message,omitempty\"`\n}\n\ntype Subscription struct {\n\tAckDeadlineSeconds int64 `json:\"ackDeadlineSeconds,omitempty\"`\n\n\tName string `json:\"name,omitempty\"`\n\n\tPushConfig *PushConfig `json:\"pushConfig,omitempty\"`\n\n\tTopic string `json:\"topic,omitempty\"`\n}\n\ntype Topic struct {\n\tName string `json:\"name,omitempty\"`\n}\n\n// method id \"pubsub.projects.subscriptions.acknowledge\":\n\ntype ProjectsSubscriptionsAcknowledgeCall struct {\n\ts                  *Service\n\tsubscription       string\n\tacknowledgerequest *AcknowledgeRequest\n\topt_               map[string]interface{}\n}\n\n// Acknowledge: Acknowledges the messages associated with the ack tokens\n// in the AcknowledgeRequest. The Pub/Sub system can remove the relevant\n// messages from the subscription. Acknowledging a message whose ack\n// deadline has expired may succeed, but such a message may be\n// redelivered later. Acknowledging a message more than once will not\n// result in an error.\nfunc (r *ProjectsSubscriptionsService) Acknowledge(subscription string, acknowledgerequest *AcknowledgeRequest) *ProjectsSubscriptionsAcknowledgeCall {\n\tc := &ProjectsSubscriptionsAcknowledgeCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.subscription = subscription\n\tc.acknowledgerequest = acknowledgerequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsAcknowledgeCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsAcknowledgeCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsAcknowledgeCall) Do() (*Empty, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+subscription}:acknowledge\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"subscription\": c.subscription,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Empty\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Acknowledges the messages associated with the ack tokens in the AcknowledgeRequest. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"pubsub.projects.subscriptions.acknowledge\",\n\t//   \"parameterOrder\": [\n\t//     \"subscription\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"subscription\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+subscription}:acknowledge\",\n\t//   \"request\": {\n\t//     \"$ref\": \"AcknowledgeRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Empty\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.subscriptions.create\":\n\ntype ProjectsSubscriptionsCreateCall struct {\n\ts            *Service\n\tname         string\n\tsubscription *Subscription\n\topt_         map[string]interface{}\n}\n\n// Create: Creates a subscription to a given topic for a given\n// subscriber. If the subscription already exists, returns\n// ALREADY_EXISTS. If the corresponding topic doesn't exist, returns\n// NOT_FOUND. If the name is not provided in the request, the server\n// will assign a random name for this subscription on the same project\n// as the topic.\nfunc (r *ProjectsSubscriptionsService) Create(name string, subscription *Subscription) *ProjectsSubscriptionsCreateCall {\n\tc := &ProjectsSubscriptionsCreateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.name = name\n\tc.subscription = subscription\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsCreateCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsCreateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsCreateCall) Do() (*Subscription, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+name}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"name\": c.name,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Subscription\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"pubsub.projects.subscriptions.create\",\n\t//   \"parameterOrder\": [\n\t//     \"name\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"name\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+name}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Subscription\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Subscription\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.subscriptions.delete\":\n\ntype ProjectsSubscriptionsDeleteCall struct {\n\ts            *Service\n\tsubscription string\n\topt_         map[string]interface{}\n}\n\n// Delete: Deletes an existing subscription. All pending messages in the\n// subscription are immediately dropped. Calls to Pull after deletion\n// will return NOT_FOUND. After a subscription is deleted, a new one may\n// be created with the same name, but the new one has no association\n// with the old subscription, or its topic unless the same topic is\n// specified.\nfunc (r *ProjectsSubscriptionsService) Delete(subscription string) *ProjectsSubscriptionsDeleteCall {\n\tc := &ProjectsSubscriptionsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.subscription = subscription\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsDeleteCall) Do() (*Empty, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+subscription}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"subscription\": c.subscription,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Empty\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to Pull after deletion will return NOT_FOUND. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"pubsub.projects.subscriptions.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"subscription\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"subscription\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+subscription}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Empty\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.subscriptions.get\":\n\ntype ProjectsSubscriptionsGetCall struct {\n\ts            *Service\n\tsubscription string\n\topt_         map[string]interface{}\n}\n\n// Get: Gets the configuration details of a subscription.\nfunc (r *ProjectsSubscriptionsService) Get(subscription string) *ProjectsSubscriptionsGetCall {\n\tc := &ProjectsSubscriptionsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.subscription = subscription\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsGetCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsGetCall) Do() (*Subscription, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+subscription}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"subscription\": c.subscription,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Subscription\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Gets the configuration details of a subscription.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"pubsub.projects.subscriptions.get\",\n\t//   \"parameterOrder\": [\n\t//     \"subscription\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"subscription\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+subscription}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Subscription\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.subscriptions.list\":\n\ntype ProjectsSubscriptionsListCall struct {\n\ts       *Service\n\tproject string\n\topt_    map[string]interface{}\n}\n\n// List: Lists matching subscriptions.\nfunc (r *ProjectsSubscriptionsService) List(project string) *ProjectsSubscriptionsListCall {\n\tc := &ProjectsSubscriptionsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.project = project\n\treturn c\n}\n\n// PageSize sets the optional parameter \"pageSize\":\nfunc (c *ProjectsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsSubscriptionsListCall {\n\tc.opt_[\"pageSize\"] = pageSize\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\":\nfunc (c *ProjectsSubscriptionsListCall) PageToken(pageToken string) *ProjectsSubscriptionsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsListCall) Do() (*ListSubscriptionsResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"pageSize\"]; ok {\n\t\tparams.Set(\"pageSize\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+project}/subscriptions\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"project\": c.project,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ListSubscriptionsResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists matching subscriptions.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"pubsub.projects.subscriptions.list\",\n\t//   \"parameterOrder\": [\n\t//     \"project\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"pageSize\": {\n\t//       \"format\": \"int32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"project\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+project}/subscriptions\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ListSubscriptionsResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.subscriptions.modifyAckDeadline\":\n\ntype ProjectsSubscriptionsModifyAckDeadlineCall struct {\n\ts                        *Service\n\tsubscription             string\n\tmodifyackdeadlinerequest *ModifyAckDeadlineRequest\n\topt_                     map[string]interface{}\n}\n\n// ModifyAckDeadline: Modifies the ack deadline for a specific message.\n// This method is useful to indicate that more time is needed to process\n// a message by the subscriber, or to make the message available for\n// redelivery if the processing was interrupted.\nfunc (r *ProjectsSubscriptionsService) ModifyAckDeadline(subscription string, modifyackdeadlinerequest *ModifyAckDeadlineRequest) *ProjectsSubscriptionsModifyAckDeadlineCall {\n\tc := &ProjectsSubscriptionsModifyAckDeadlineCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.subscription = subscription\n\tc.modifyackdeadlinerequest = modifyackdeadlinerequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsModifyAckDeadlineCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyAckDeadlineCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do() (*Empty, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+subscription}:modifyAckDeadline\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"subscription\": c.subscription,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Empty\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"pubsub.projects.subscriptions.modifyAckDeadline\",\n\t//   \"parameterOrder\": [\n\t//     \"subscription\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"subscription\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+subscription}:modifyAckDeadline\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ModifyAckDeadlineRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Empty\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.subscriptions.modifyPushConfig\":\n\ntype ProjectsSubscriptionsModifyPushConfigCall struct {\n\ts                       *Service\n\tsubscription            string\n\tmodifypushconfigrequest *ModifyPushConfigRequest\n\topt_                    map[string]interface{}\n}\n\n// ModifyPushConfig: Modifies the PushConfig for a specified\n// subscription. This may be used to change a push subscription to a\n// pull one (signified by an empty PushConfig) or vice versa, or change\n// the endpoint URL and other attributes of a push subscription.\n// Messages will accumulate for delivery continuously through the call\n// regardless of changes to the PushConfig.\nfunc (r *ProjectsSubscriptionsService) ModifyPushConfig(subscription string, modifypushconfigrequest *ModifyPushConfigRequest) *ProjectsSubscriptionsModifyPushConfigCall {\n\tc := &ProjectsSubscriptionsModifyPushConfigCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.subscription = subscription\n\tc.modifypushconfigrequest = modifypushconfigrequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsModifyPushConfigCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyPushConfigCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsModifyPushConfigCall) Do() (*Empty, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+subscription}:modifyPushConfig\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"subscription\": c.subscription,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Empty\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Modifies the PushConfig for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty PushConfig) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the PushConfig.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"pubsub.projects.subscriptions.modifyPushConfig\",\n\t//   \"parameterOrder\": [\n\t//     \"subscription\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"subscription\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+subscription}:modifyPushConfig\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ModifyPushConfigRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Empty\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.subscriptions.pull\":\n\ntype ProjectsSubscriptionsPullCall struct {\n\ts            *Service\n\tsubscription string\n\tpullrequest  *PullRequest\n\topt_         map[string]interface{}\n}\n\n// Pull: Pulls messages from the server. Returns an empty list if there\n// are no messages available in the backlog. The server may return\n// UNAVAILABLE if there are too many concurrent pull requests pending\n// for the given subscription.\nfunc (r *ProjectsSubscriptionsService) Pull(subscription string, pullrequest *PullRequest) *ProjectsSubscriptionsPullCall {\n\tc := &ProjectsSubscriptionsPullCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.subscription = subscription\n\tc.pullrequest = pullrequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsSubscriptionsPullCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsPullCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsSubscriptionsPullCall) Do() (*PullResponse, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+subscription}:pull\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"subscription\": c.subscription,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *PullResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return UNAVAILABLE if there are too many concurrent pull requests pending for the given subscription.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"pubsub.projects.subscriptions.pull\",\n\t//   \"parameterOrder\": [\n\t//     \"subscription\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"subscription\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+subscription}:pull\",\n\t//   \"request\": {\n\t//     \"$ref\": \"PullRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"PullResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.topics.create\":\n\ntype ProjectsTopicsCreateCall struct {\n\ts     *Service\n\tname  string\n\ttopic *Topic\n\topt_  map[string]interface{}\n}\n\n// Create: Creates the given topic with the given name.\nfunc (r *ProjectsTopicsService) Create(name string, topic *Topic) *ProjectsTopicsCreateCall {\n\tc := &ProjectsTopicsCreateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.name = name\n\tc.topic = topic\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsTopicsCreateCall) Fields(s ...googleapi.Field) *ProjectsTopicsCreateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsTopicsCreateCall) Do() (*Topic, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.topic)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+name}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"name\": c.name,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Topic\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates the given topic with the given name.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"pubsub.projects.topics.create\",\n\t//   \"parameterOrder\": [\n\t//     \"name\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"name\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+name}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Topic\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Topic\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.topics.delete\":\n\ntype ProjectsTopicsDeleteCall struct {\n\ts     *Service\n\ttopic string\n\topt_  map[string]interface{}\n}\n\n// Delete: Deletes the topic with the given name. Returns NOT_FOUND if\n// the topic does not exist. After a topic is deleted, a new topic may\n// be created with the same name; this is an entirely new topic with\n// none of the old configuration or subscriptions. Existing\n// subscriptions to this topic are not deleted.\nfunc (r *ProjectsTopicsService) Delete(topic string) *ProjectsTopicsDeleteCall {\n\tc := &ProjectsTopicsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.topic = topic\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsTopicsDeleteCall) Fields(s ...googleapi.Field) *ProjectsTopicsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsTopicsDeleteCall) Do() (*Empty, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+topic}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"topic\": c.topic,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Empty\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Deletes the topic with the given name. Returns NOT_FOUND if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"pubsub.projects.topics.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"topic\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"topic\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+topic}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Empty\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.topics.get\":\n\ntype ProjectsTopicsGetCall struct {\n\ts     *Service\n\ttopic string\n\topt_  map[string]interface{}\n}\n\n// Get: Gets the configuration of a topic.\nfunc (r *ProjectsTopicsService) Get(topic string) *ProjectsTopicsGetCall {\n\tc := &ProjectsTopicsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.topic = topic\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsTopicsGetCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsTopicsGetCall) Do() (*Topic, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+topic}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"topic\": c.topic,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Topic\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Gets the configuration of a topic.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"pubsub.projects.topics.get\",\n\t//   \"parameterOrder\": [\n\t//     \"topic\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"topic\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+topic}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Topic\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.topics.list\":\n\ntype ProjectsTopicsListCall struct {\n\ts       *Service\n\tproject string\n\topt_    map[string]interface{}\n}\n\n// List: Lists matching topics.\nfunc (r *ProjectsTopicsService) List(project string) *ProjectsTopicsListCall {\n\tc := &ProjectsTopicsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.project = project\n\treturn c\n}\n\n// PageSize sets the optional parameter \"pageSize\":\nfunc (c *ProjectsTopicsListCall) PageSize(pageSize int64) *ProjectsTopicsListCall {\n\tc.opt_[\"pageSize\"] = pageSize\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\":\nfunc (c *ProjectsTopicsListCall) PageToken(pageToken string) *ProjectsTopicsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsTopicsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsTopicsListCall) Do() (*ListTopicsResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"pageSize\"]; ok {\n\t\tparams.Set(\"pageSize\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+project}/topics\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"project\": c.project,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ListTopicsResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists matching topics.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"pubsub.projects.topics.list\",\n\t//   \"parameterOrder\": [\n\t//     \"project\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"pageSize\": {\n\t//       \"format\": \"int32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"project\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+project}/topics\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ListTopicsResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.topics.publish\":\n\ntype ProjectsTopicsPublishCall struct {\n\ts              *Service\n\ttopic          string\n\tpublishrequest *PublishRequest\n\topt_           map[string]interface{}\n}\n\n// Publish: Adds one or more messages to the topic. Returns NOT_FOUND if\n// the topic does not exist.\nfunc (r *ProjectsTopicsService) Publish(topic string, publishrequest *PublishRequest) *ProjectsTopicsPublishCall {\n\tc := &ProjectsTopicsPublishCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.topic = topic\n\tc.publishrequest = publishrequest\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsTopicsPublishCall) Fields(s ...googleapi.Field) *ProjectsTopicsPublishCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsTopicsPublishCall) Do() (*PublishResponse, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+topic}:publish\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"topic\": c.topic,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *PublishResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Adds one or more messages to the topic. Returns NOT_FOUND if the topic does not exist.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"pubsub.projects.topics.publish\",\n\t//   \"parameterOrder\": [\n\t//     \"topic\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"topic\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+topic}:publish\",\n\t//   \"request\": {\n\t//     \"$ref\": \"PublishRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"PublishResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"pubsub.projects.topics.subscriptions.list\":\n\ntype ProjectsTopicsSubscriptionsListCall struct {\n\ts     *Service\n\ttopic string\n\topt_  map[string]interface{}\n}\n\n// List: Lists the name of the subscriptions for this topic.\nfunc (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall {\n\tc := &ProjectsTopicsSubscriptionsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.topic = topic\n\treturn c\n}\n\n// PageSize sets the optional parameter \"pageSize\":\nfunc (c *ProjectsTopicsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsTopicsSubscriptionsListCall {\n\tc.opt_[\"pageSize\"] = pageSize\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\":\nfunc (c *ProjectsTopicsSubscriptionsListCall) PageToken(pageToken string) *ProjectsTopicsSubscriptionsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ProjectsTopicsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsSubscriptionsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ProjectsTopicsSubscriptionsListCall) Do() (*ListTopicSubscriptionsResponse, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"pageSize\"]; ok {\n\t\tparams.Set(\"pageSize\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"{+topic}/subscriptions\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"topic\": c.topic,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ListTopicSubscriptionsResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Lists the name of the subscriptions for this topic.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"pubsub.projects.topics.subscriptions.list\",\n\t//   \"parameterOrder\": [\n\t//     \"topic\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"pageSize\": {\n\t//       \"format\": \"int32\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"topic\": {\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"{+topic}/subscriptions\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ListTopicSubscriptionsResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/pubsub\"\n\t//   ]\n\t// }\n\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/storage/v1/storage-api.json",
    "content": "{\n \"kind\": \"discovery#restDescription\",\n \"etag\": \"\\\"ye6orv2F-1npMW3u9suM3a7C5Bo/lTxRjj5-AURGfd9glUYk42wgbOA\\\"\",\n \"discoveryVersion\": \"v1\",\n \"id\": \"storage:v1\",\n \"name\": \"storage\",\n \"version\": \"v1\",\n \"revision\": \"20150326\",\n \"title\": \"Cloud Storage API\",\n \"description\": \"Lets you store and retrieve potentially-large, immutable data objects.\",\n \"ownerDomain\": \"google.com\",\n \"ownerName\": \"Google\",\n \"icons\": {\n  \"x16\": \"https://www.google.com/images/icons/product/cloud_storage-16.png\",\n  \"x32\": \"https://www.google.com/images/icons/product/cloud_storage-32.png\"\n },\n \"documentationLink\": \"https://developers.google.com/storage/docs/json_api/\",\n \"labels\": [\n  \"labs\"\n ],\n \"protocol\": \"rest\",\n \"baseUrl\": \"https://www.googleapis.com/storage/v1/\",\n \"basePath\": \"/storage/v1/\",\n \"rootUrl\": \"https://www.googleapis.com/\",\n \"servicePath\": \"storage/v1/\",\n \"batchPath\": \"batch\",\n \"parameters\": {\n  \"alt\": {\n   \"type\": \"string\",\n   \"description\": \"Data format for the response.\",\n   \"default\": \"json\",\n   \"enum\": [\n    \"json\"\n   ],\n   \"enumDescriptions\": [\n    \"Responses with Content-Type of application/json\"\n   ],\n   \"location\": \"query\"\n  },\n  \"fields\": {\n   \"type\": \"string\",\n   \"description\": \"Selector specifying which fields to include in a partial response.\",\n   \"location\": \"query\"\n  },\n  \"key\": {\n   \"type\": \"string\",\n   \"description\": \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n   \"location\": \"query\"\n  },\n  \"oauth_token\": {\n   \"type\": \"string\",\n   \"description\": \"OAuth 2.0 token for the current user.\",\n   \"location\": \"query\"\n  },\n  \"prettyPrint\": {\n   \"type\": \"boolean\",\n   \"description\": \"Returns response with indentations and line breaks.\",\n   \"default\": \"true\",\n   \"location\": \"query\"\n  },\n  \"quotaUser\": {\n   \"type\": \"string\",\n   \"description\": \"Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.\",\n   \"location\": \"query\"\n  },\n  \"userIp\": {\n   \"type\": \"string\",\n   \"description\": \"IP address of the site where the request originates. Use this if you want to enforce per-user limits.\",\n   \"location\": \"query\"\n  }\n },\n \"auth\": {\n  \"oauth2\": {\n   \"scopes\": {\n    \"https://www.googleapis.com/auth/cloud-platform\": {\n     \"description\": \"View and manage your data across Google Cloud Platform services\"\n    },\n    \"https://www.googleapis.com/auth/devstorage.full_control\": {\n     \"description\": \"Manage your data and permissions in Google Cloud Storage\"\n    },\n    \"https://www.googleapis.com/auth/devstorage.read_only\": {\n     \"description\": \"View your data in Google Cloud Storage\"\n    },\n    \"https://www.googleapis.com/auth/devstorage.read_write\": {\n     \"description\": \"Manage your data in Google Cloud Storage\"\n    }\n   }\n  }\n },\n \"schemas\": {\n  \"Bucket\": {\n   \"id\": \"Bucket\",\n   \"type\": \"object\",\n   \"description\": \"A bucket.\",\n   \"properties\": {\n    \"acl\": {\n     \"type\": \"array\",\n     \"description\": \"Access controls on the bucket.\",\n     \"items\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"annotations\": {\n      \"required\": [\n       \"storage.buckets.update\"\n      ]\n     }\n    },\n    \"cors\": {\n     \"type\": \"array\",\n     \"description\": \"The bucket's Cross-Origin Resource Sharing (CORS) configuration.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"maxAgeSeconds\": {\n        \"type\": \"integer\",\n        \"description\": \"The value, in seconds, to return in the  Access-Control-Max-Age header used in preflight responses.\",\n        \"format\": \"int32\"\n       },\n       \"method\": {\n        \"type\": \"array\",\n        \"description\": \"The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \\\"*\\\" is permitted in the list of methods, and means \\\"any method\\\".\",\n        \"items\": {\n         \"type\": \"string\"\n        }\n       },\n       \"origin\": {\n        \"type\": \"array\",\n        \"description\": \"The list of Origins eligible to receive CORS response headers. Note: \\\"*\\\" is permitted in the list of origins, and means \\\"any Origin\\\".\",\n        \"items\": {\n         \"type\": \"string\"\n        }\n       },\n       \"responseHeader\": {\n        \"type\": \"array\",\n        \"description\": \"The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.\",\n        \"items\": {\n         \"type\": \"string\"\n        }\n       }\n      }\n     }\n    },\n    \"defaultObjectAcl\": {\n     \"type\": \"array\",\n     \"description\": \"Default access controls to apply to new objects when no ACL is provided.\",\n     \"items\": {\n      \"$ref\": \"ObjectAccessControl\"\n     }\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"HTTP 1.1 Entity tag for the bucket.\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"The ID of the bucket.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For buckets, this is always storage#bucket.\",\n     \"default\": \"storage#bucket\"\n    },\n    \"lifecycle\": {\n     \"type\": \"object\",\n     \"description\": \"The bucket's lifecycle configuration. See lifecycle management for more information.\",\n     \"properties\": {\n      \"rule\": {\n       \"type\": \"array\",\n       \"description\": \"A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.\",\n       \"items\": {\n        \"type\": \"object\",\n        \"properties\": {\n         \"action\": {\n          \"type\": \"object\",\n          \"description\": \"The action to take.\",\n          \"properties\": {\n           \"type\": {\n            \"type\": \"string\",\n            \"description\": \"Type of the action. Currently, only Delete is supported.\"\n           }\n          }\n         },\n         \"condition\": {\n          \"type\": \"object\",\n          \"description\": \"The condition(s) under which the action will be taken.\",\n          \"properties\": {\n           \"age\": {\n            \"type\": \"integer\",\n            \"description\": \"Age of an object (in days). This condition is satisfied when an object reaches the specified age.\",\n            \"format\": \"int32\"\n           },\n           \"createdBefore\": {\n            \"type\": \"string\",\n            \"description\": \"A date in RFC 3339 format with only the date part (for instance, \\\"2013-01-15\\\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.\",\n            \"format\": \"date\"\n           },\n           \"isLive\": {\n            \"type\": \"boolean\",\n            \"description\": \"Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.\"\n           },\n           \"numNewerVersions\": {\n            \"type\": \"integer\",\n            \"description\": \"Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.\",\n            \"format\": \"int32\"\n           }\n          }\n         }\n        }\n       }\n      }\n     }\n    },\n    \"location\": {\n     \"type\": \"string\",\n     \"description\": \"The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.\"\n    },\n    \"logging\": {\n     \"type\": \"object\",\n     \"description\": \"The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.\",\n     \"properties\": {\n      \"logBucket\": {\n       \"type\": \"string\",\n       \"description\": \"The destination bucket where the current bucket's logs should be placed.\"\n      },\n      \"logObjectPrefix\": {\n       \"type\": \"string\",\n       \"description\": \"A prefix for log object names.\"\n      }\n     }\n    },\n    \"metageneration\": {\n     \"type\": \"string\",\n     \"description\": \"The metadata generation of this bucket.\",\n     \"format\": \"int64\"\n    },\n    \"name\": {\n     \"type\": \"string\",\n     \"description\": \"The name of the bucket.\",\n     \"annotations\": {\n      \"required\": [\n       \"storage.buckets.insert\"\n      ]\n     }\n    },\n    \"owner\": {\n     \"type\": \"object\",\n     \"description\": \"The owner of the bucket. This is always the project team's owner group.\",\n     \"properties\": {\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity, in the form project-owner-projectId.\"\n      },\n      \"entityId\": {\n       \"type\": \"string\",\n       \"description\": \"The ID for the entity.\"\n      }\n     }\n    },\n    \"projectNumber\": {\n     \"type\": \"string\",\n     \"description\": \"The project number of the project the bucket belongs to.\",\n     \"format\": \"uint64\"\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"The URI of this bucket.\"\n    },\n    \"storageClass\": {\n     \"type\": \"string\",\n     \"description\": \"The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes.\"\n    },\n    \"timeCreated\": {\n     \"type\": \"string\",\n     \"description\": \"Creation time of the bucket in RFC 3339 format.\",\n     \"format\": \"date-time\"\n    },\n    \"versioning\": {\n     \"type\": \"object\",\n     \"description\": \"The bucket's versioning configuration.\",\n     \"properties\": {\n      \"enabled\": {\n       \"type\": \"boolean\",\n       \"description\": \"While set to true, versioning is fully enabled for this bucket.\"\n      }\n     }\n    },\n    \"website\": {\n     \"type\": \"object\",\n     \"description\": \"The bucket's website configuration.\",\n     \"properties\": {\n      \"mainPageSuffix\": {\n       \"type\": \"string\",\n       \"description\": \"Behaves as the bucket's directory index where missing objects are treated as potential directories.\"\n      },\n      \"notFoundPage\": {\n       \"type\": \"string\",\n       \"description\": \"The custom object to return when a requested resource is not found.\"\n      }\n     }\n    }\n   }\n  },\n  \"BucketAccessControl\": {\n   \"id\": \"BucketAccessControl\",\n   \"type\": \"object\",\n   \"description\": \"An access-control entry.\",\n   \"properties\": {\n    \"bucket\": {\n     \"type\": \"string\",\n     \"description\": \"The name of the bucket.\"\n    },\n    \"domain\": {\n     \"type\": \"string\",\n     \"description\": \"The domain associated with the entity, if any.\"\n    },\n    \"email\": {\n     \"type\": \"string\",\n     \"description\": \"The email address associated with the entity, if any.\"\n    },\n    \"entity\": {\n     \"type\": \"string\",\n     \"description\": \"The entity holding the permission, in one of the following forms: \\n- user-userId \\n- user-email \\n- group-groupId \\n- group-email \\n- domain-domain \\n- project-team-projectId \\n- allUsers \\n- allAuthenticatedUsers Examples: \\n- The user liz@example.com would be user-liz@example.com. \\n- The group example@googlegroups.com would be group-example@googlegroups.com. \\n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.\",\n     \"annotations\": {\n      \"required\": [\n       \"storage.bucketAccessControls.insert\"\n      ]\n     }\n    },\n    \"entityId\": {\n     \"type\": \"string\",\n     \"description\": \"The ID for the entity, if any.\"\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"HTTP 1.1 Entity tag for the access-control entry.\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"The ID of the access-control entry.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.\",\n     \"default\": \"storage#bucketAccessControl\"\n    },\n    \"projectTeam\": {\n     \"type\": \"object\",\n     \"description\": \"The project team associated with the entity, if any.\",\n     \"properties\": {\n      \"projectNumber\": {\n       \"type\": \"string\",\n       \"description\": \"The project number.\"\n      },\n      \"team\": {\n       \"type\": \"string\",\n       \"description\": \"The team. Can be owners, editors, or viewers.\"\n      }\n     }\n    },\n    \"role\": {\n     \"type\": \"string\",\n     \"description\": \"The access permission for the entity. Can be READER, WRITER, or OWNER.\",\n     \"annotations\": {\n      \"required\": [\n       \"storage.bucketAccessControls.insert\"\n      ]\n     }\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"The link to this access-control entry.\"\n    }\n   }\n  },\n  \"BucketAccessControls\": {\n   \"id\": \"BucketAccessControls\",\n   \"type\": \"object\",\n   \"description\": \"An access-control list.\",\n   \"properties\": {\n    \"items\": {\n     \"type\": \"array\",\n     \"description\": \"The list of items.\",\n     \"items\": {\n      \"$ref\": \"BucketAccessControl\"\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.\",\n     \"default\": \"storage#bucketAccessControls\"\n    }\n   }\n  },\n  \"Buckets\": {\n   \"id\": \"Buckets\",\n   \"type\": \"object\",\n   \"description\": \"A list of buckets.\",\n   \"properties\": {\n    \"items\": {\n     \"type\": \"array\",\n     \"description\": \"The list of items.\",\n     \"items\": {\n      \"$ref\": \"Bucket\"\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For lists of buckets, this is always storage#buckets.\",\n     \"default\": \"storage#buckets\"\n    },\n    \"nextPageToken\": {\n     \"type\": \"string\",\n     \"description\": \"The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.\"\n    }\n   }\n  },\n  \"Channel\": {\n   \"id\": \"Channel\",\n   \"type\": \"object\",\n   \"description\": \"An notification channel used to watch for resource changes.\",\n   \"properties\": {\n    \"address\": {\n     \"type\": \"string\",\n     \"description\": \"The address where notifications are delivered for this channel.\"\n    },\n    \"expiration\": {\n     \"type\": \"string\",\n     \"description\": \"Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.\",\n     \"format\": \"int64\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"A UUID or similar unique string that identifies this channel.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \\\"api#channel\\\".\",\n     \"default\": \"api#channel\"\n    },\n    \"params\": {\n     \"type\": \"object\",\n     \"description\": \"Additional parameters controlling delivery channel behavior. Optional.\",\n     \"additionalProperties\": {\n      \"type\": \"string\",\n      \"description\": \"Declares a new parameter by name.\"\n     }\n    },\n    \"payload\": {\n     \"type\": \"boolean\",\n     \"description\": \"A Boolean value to indicate whether payload is wanted. Optional.\"\n    },\n    \"resourceId\": {\n     \"type\": \"string\",\n     \"description\": \"An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.\"\n    },\n    \"resourceUri\": {\n     \"type\": \"string\",\n     \"description\": \"A version-specific identifier for the watched resource.\"\n    },\n    \"token\": {\n     \"type\": \"string\",\n     \"description\": \"An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.\"\n    },\n    \"type\": {\n     \"type\": \"string\",\n     \"description\": \"The type of delivery mechanism used for this channel.\"\n    }\n   }\n  },\n  \"ComposeRequest\": {\n   \"id\": \"ComposeRequest\",\n   \"type\": \"object\",\n   \"description\": \"A Compose request.\",\n   \"properties\": {\n    \"destination\": {\n     \"$ref\": \"Object\",\n     \"description\": \"Properties of the resulting object.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is.\",\n     \"default\": \"storage#composeRequest\"\n    },\n    \"sourceObjects\": {\n     \"type\": \"array\",\n     \"description\": \"The list of source objects that will be concatenated into a single object.\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"generation\": {\n        \"type\": \"string\",\n        \"description\": \"The generation of this object to use as the source.\",\n        \"format\": \"int64\"\n       },\n       \"name\": {\n        \"type\": \"string\",\n        \"description\": \"The source object's name. The source object's bucket is implicitly the destination bucket.\",\n        \"annotations\": {\n         \"required\": [\n          \"storage.objects.compose\"\n         ]\n        }\n       },\n       \"objectPreconditions\": {\n        \"type\": \"object\",\n        \"description\": \"Conditions that must be met for this operation to execute.\",\n        \"properties\": {\n         \"ifGenerationMatch\": {\n          \"type\": \"string\",\n          \"description\": \"Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.\",\n          \"format\": \"int64\"\n         }\n        }\n       }\n      }\n     },\n     \"annotations\": {\n      \"required\": [\n       \"storage.objects.compose\"\n      ]\n     }\n    }\n   }\n  },\n  \"Object\": {\n   \"id\": \"Object\",\n   \"type\": \"object\",\n   \"description\": \"An object.\",\n   \"properties\": {\n    \"acl\": {\n     \"type\": \"array\",\n     \"description\": \"Access controls on the object.\",\n     \"items\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"annotations\": {\n      \"required\": [\n       \"storage.objects.update\"\n      ]\n     }\n    },\n    \"bucket\": {\n     \"type\": \"string\",\n     \"description\": \"The name of the bucket containing this object.\"\n    },\n    \"cacheControl\": {\n     \"type\": \"string\",\n     \"description\": \"Cache-Control directive for the object data.\"\n    },\n    \"componentCount\": {\n     \"type\": \"integer\",\n     \"description\": \"Number of underlying components that make up this object. Components are accumulated by compose operations.\",\n     \"format\": \"int32\"\n    },\n    \"contentDisposition\": {\n     \"type\": \"string\",\n     \"description\": \"Content-Disposition of the object data.\"\n    },\n    \"contentEncoding\": {\n     \"type\": \"string\",\n     \"description\": \"Content-Encoding of the object data.\"\n    },\n    \"contentLanguage\": {\n     \"type\": \"string\",\n     \"description\": \"Content-Language of the object data.\"\n    },\n    \"contentType\": {\n     \"type\": \"string\",\n     \"description\": \"Content-Type of the object data.\",\n     \"annotations\": {\n      \"required\": [\n       \"storage.objects.update\"\n      ]\n     }\n    },\n    \"crc32c\": {\n     \"type\": \"string\",\n     \"description\": \"CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order.\"\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"HTTP 1.1 Entity tag for the object.\"\n    },\n    \"generation\": {\n     \"type\": \"string\",\n     \"description\": \"The content generation of this object. Used for object versioning.\",\n     \"format\": \"int64\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"The ID of the object.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For objects, this is always storage#object.\",\n     \"default\": \"storage#object\"\n    },\n    \"md5Hash\": {\n     \"type\": \"string\",\n     \"description\": \"MD5 hash of the data; encoded using base64.\"\n    },\n    \"mediaLink\": {\n     \"type\": \"string\",\n     \"description\": \"Media download link.\"\n    },\n    \"metadata\": {\n     \"type\": \"object\",\n     \"description\": \"User-provided metadata, in key/value pairs.\",\n     \"additionalProperties\": {\n      \"type\": \"string\",\n      \"description\": \"An individual metadata entry.\"\n     }\n    },\n    \"metageneration\": {\n     \"type\": \"string\",\n     \"description\": \"The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.\",\n     \"format\": \"int64\"\n    },\n    \"name\": {\n     \"type\": \"string\",\n     \"description\": \"The name of this object. Required if not specified by URL parameter.\"\n    },\n    \"owner\": {\n     \"type\": \"object\",\n     \"description\": \"The owner of the object. This will always be the uploader of the object.\",\n     \"properties\": {\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity, in the form user-userId.\"\n      },\n      \"entityId\": {\n       \"type\": \"string\",\n       \"description\": \"The ID for the entity.\"\n      }\n     }\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"The link to this object.\"\n    },\n    \"size\": {\n     \"type\": \"string\",\n     \"description\": \"Content-Length of the data in bytes.\",\n     \"format\": \"uint64\"\n    },\n    \"storageClass\": {\n     \"type\": \"string\",\n     \"description\": \"Storage class of the object.\"\n    },\n    \"timeDeleted\": {\n     \"type\": \"string\",\n     \"description\": \"The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.\",\n     \"format\": \"date-time\"\n    },\n    \"updated\": {\n     \"type\": \"string\",\n     \"description\": \"The creation or modification time of the object in RFC 3339 format. For buckets with versioning enabled, changing an object's metadata does not change this property.\",\n     \"format\": \"date-time\"\n    }\n   }\n  },\n  \"ObjectAccessControl\": {\n   \"id\": \"ObjectAccessControl\",\n   \"type\": \"object\",\n   \"description\": \"An access-control entry.\",\n   \"properties\": {\n    \"bucket\": {\n     \"type\": \"string\",\n     \"description\": \"The name of the bucket.\"\n    },\n    \"domain\": {\n     \"type\": \"string\",\n     \"description\": \"The domain associated with the entity, if any.\"\n    },\n    \"email\": {\n     \"type\": \"string\",\n     \"description\": \"The email address associated with the entity, if any.\"\n    },\n    \"entity\": {\n     \"type\": \"string\",\n     \"description\": \"The entity holding the permission, in one of the following forms: \\n- user-userId \\n- user-email \\n- group-groupId \\n- group-email \\n- domain-domain \\n- project-team-projectId \\n- allUsers \\n- allAuthenticatedUsers Examples: \\n- The user liz@example.com would be user-liz@example.com. \\n- The group example@googlegroups.com would be group-example@googlegroups.com. \\n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.\"\n    },\n    \"entityId\": {\n     \"type\": \"string\",\n     \"description\": \"The ID for the entity, if any.\"\n    },\n    \"etag\": {\n     \"type\": \"string\",\n     \"description\": \"HTTP 1.1 Entity tag for the access-control entry.\"\n    },\n    \"generation\": {\n     \"type\": \"string\",\n     \"description\": \"The content generation of the object.\",\n     \"format\": \"int64\"\n    },\n    \"id\": {\n     \"type\": \"string\",\n     \"description\": \"The ID of the access-control entry.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For object access control entries, this is always storage#objectAccessControl.\",\n     \"default\": \"storage#objectAccessControl\"\n    },\n    \"object\": {\n     \"type\": \"string\",\n     \"description\": \"The name of the object.\"\n    },\n    \"projectTeam\": {\n     \"type\": \"object\",\n     \"description\": \"The project team associated with the entity, if any.\",\n     \"properties\": {\n      \"projectNumber\": {\n       \"type\": \"string\",\n       \"description\": \"The project number.\"\n      },\n      \"team\": {\n       \"type\": \"string\",\n       \"description\": \"The team. Can be owners, editors, or viewers.\"\n      }\n     }\n    },\n    \"role\": {\n     \"type\": \"string\",\n     \"description\": \"The access permission for the entity. Can be READER or OWNER.\"\n    },\n    \"selfLink\": {\n     \"type\": \"string\",\n     \"description\": \"The link to this access-control entry.\"\n    }\n   }\n  },\n  \"ObjectAccessControls\": {\n   \"id\": \"ObjectAccessControls\",\n   \"type\": \"object\",\n   \"description\": \"An access-control list.\",\n   \"properties\": {\n    \"items\": {\n     \"type\": \"array\",\n     \"description\": \"The list of items.\",\n     \"items\": {\n      \"type\": \"any\"\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.\",\n     \"default\": \"storage#objectAccessControls\"\n    }\n   }\n  },\n  \"Objects\": {\n   \"id\": \"Objects\",\n   \"type\": \"object\",\n   \"description\": \"A list of objects.\",\n   \"properties\": {\n    \"items\": {\n     \"type\": \"array\",\n     \"description\": \"The list of items.\",\n     \"items\": {\n      \"$ref\": \"Object\"\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is. For lists of objects, this is always storage#objects.\",\n     \"default\": \"storage#objects\"\n    },\n    \"nextPageToken\": {\n     \"type\": \"string\",\n     \"description\": \"The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.\"\n    },\n    \"prefixes\": {\n     \"type\": \"array\",\n     \"description\": \"The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.\",\n     \"items\": {\n      \"type\": \"string\"\n     }\n    }\n   }\n  },\n  \"RewriteResponse\": {\n   \"id\": \"RewriteResponse\",\n   \"type\": \"object\",\n   \"description\": \"A rewrite response.\",\n   \"properties\": {\n    \"done\": {\n     \"type\": \"boolean\",\n     \"description\": \"true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"description\": \"The kind of item this is.\",\n     \"default\": \"storage#rewriteResponse\"\n    },\n    \"objectSize\": {\n     \"type\": \"string\",\n     \"description\": \"The total size of the object being copied in bytes. This property is always present in the response.\",\n     \"format\": \"uint64\"\n    },\n    \"resource\": {\n     \"$ref\": \"Object\",\n     \"description\": \"A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes.\"\n    },\n    \"rewriteToken\": {\n     \"type\": \"string\",\n     \"description\": \"A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.\"\n    },\n    \"totalBytesRewritten\": {\n     \"type\": \"string\",\n     \"description\": \"The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.\",\n     \"format\": \"uint64\"\n    }\n   }\n  }\n },\n \"resources\": {\n  \"bucketAccessControls\": {\n   \"methods\": {\n    \"delete\": {\n     \"id\": \"storage.bucketAccessControls.delete\",\n     \"path\": \"b/{bucket}/acl/{entity}\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Permanently deletes the ACL entry for the specified entity on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"get\": {\n     \"id\": \"storage.bucketAccessControls.get\",\n     \"path\": \"b/{bucket}/acl/{entity}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Returns the ACL entry for the specified entity on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"response\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"insert\": {\n     \"id\": \"storage.bucketAccessControls.insert\",\n     \"path\": \"b/{bucket}/acl\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Creates a new ACL entry on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"request\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"list\": {\n     \"id\": \"storage.bucketAccessControls.list\",\n     \"path\": \"b/{bucket}/acl\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves ACL entries on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"response\": {\n      \"$ref\": \"BucketAccessControls\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"patch\": {\n     \"id\": \"storage.bucketAccessControls.patch\",\n     \"path\": \"b/{bucket}/acl/{entity}\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Updates an ACL entry on the specified bucket. This method supports patch semantics.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"request\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"update\": {\n     \"id\": \"storage.bucketAccessControls.update\",\n     \"path\": \"b/{bucket}/acl/{entity}\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Updates an ACL entry on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"request\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"BucketAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    }\n   }\n  },\n  \"buckets\": {\n   \"methods\": {\n    \"delete\": {\n     \"id\": \"storage.buckets.delete\",\n     \"path\": \"b/{bucket}\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Permanently deletes an empty bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"If set, only deletes the bucket if its metageneration matches this value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"If set, only deletes the bucket if its metageneration does not match this value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"get\": {\n     \"id\": \"storage.buckets.get\",\n     \"path\": \"b/{bucket}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Returns metadata for the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit acl and defaultObjectAcl properties.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"response\": {\n      \"$ref\": \"Bucket\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_only\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"insert\": {\n     \"id\": \"storage.buckets.insert\",\n     \"path\": \"b\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Creates a new bucket.\",\n     \"parameters\": {\n      \"predefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to this bucket.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\",\n        \"publicReadWrite\"\n       ],\n       \"enumDescriptions\": [\n        \"Project team owners get OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Project team owners get OWNER access.\",\n        \"Project team members get access according to their roles.\",\n        \"Project team owners get OWNER access, and allUsers get READER access.\",\n        \"Project team owners get OWNER access, and allUsers get WRITER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"predefinedDefaultObjectAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of default object access controls to this bucket.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"project\": {\n       \"type\": \"string\",\n       \"description\": \"A valid API project identifier.\",\n       \"required\": true,\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit acl and defaultObjectAcl properties.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"project\"\n     ],\n     \"request\": {\n      \"$ref\": \"Bucket\"\n     },\n     \"response\": {\n      \"$ref\": \"Bucket\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"list\": {\n     \"id\": \"storage.buckets.list\",\n     \"path\": \"b\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves a list of buckets for a given project.\",\n     \"parameters\": {\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of buckets to return.\",\n       \"format\": \"uint32\",\n       \"minimum\": \"0\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"A previously-returned page token representing part of the larger set of results to view.\",\n       \"location\": \"query\"\n      },\n      \"prefix\": {\n       \"type\": \"string\",\n       \"description\": \"Filter results to buckets whose names begin with this prefix.\",\n       \"location\": \"query\"\n      },\n      \"project\": {\n       \"type\": \"string\",\n       \"description\": \"A valid API project identifier.\",\n       \"required\": true,\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit acl and defaultObjectAcl properties.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"project\"\n     ],\n     \"response\": {\n      \"$ref\": \"Buckets\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_only\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"patch\": {\n     \"id\": \"storage.buckets.patch\",\n     \"path\": \"b/{bucket}\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Updates a bucket. This method supports patch semantics.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"predefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to this bucket.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\",\n        \"publicReadWrite\"\n       ],\n       \"enumDescriptions\": [\n        \"Project team owners get OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Project team owners get OWNER access.\",\n        \"Project team members get access according to their roles.\",\n        \"Project team owners get OWNER access, and allUsers get READER access.\",\n        \"Project team owners get OWNER access, and allUsers get WRITER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"predefinedDefaultObjectAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of default object access controls to this bucket.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit acl and defaultObjectAcl properties.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"request\": {\n      \"$ref\": \"Bucket\"\n     },\n     \"response\": {\n      \"$ref\": \"Bucket\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"update\": {\n     \"id\": \"storage.buckets.update\",\n     \"path\": \"b/{bucket}\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Updates a bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"predefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to this bucket.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\",\n        \"publicReadWrite\"\n       ],\n       \"enumDescriptions\": [\n        \"Project team owners get OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Project team owners get OWNER access.\",\n        \"Project team members get access according to their roles.\",\n        \"Project team owners get OWNER access, and allUsers get READER access.\",\n        \"Project team owners get OWNER access, and allUsers get WRITER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"predefinedDefaultObjectAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of default object access controls to this bucket.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit acl and defaultObjectAcl properties.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"request\": {\n      \"$ref\": \"Bucket\"\n     },\n     \"response\": {\n      \"$ref\": \"Bucket\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    }\n   }\n  },\n  \"channels\": {\n   \"methods\": {\n    \"stop\": {\n     \"id\": \"storage.channels.stop\",\n     \"path\": \"channels/stop\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Stop watching resources through this channel\",\n     \"request\": {\n      \"$ref\": \"Channel\",\n      \"parameterName\": \"resource\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_only\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    }\n   }\n  },\n  \"defaultObjectAccessControls\": {\n   \"methods\": {\n    \"delete\": {\n     \"id\": \"storage.defaultObjectAccessControls.delete\",\n     \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Permanently deletes the default object ACL entry for the specified entity on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"get\": {\n     \"id\": \"storage.defaultObjectAccessControls.get\",\n     \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Returns the default object ACL entry for the specified entity on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"insert\": {\n     \"id\": \"storage.defaultObjectAccessControls.insert\",\n     \"path\": \"b/{bucket}/defaultObjectAcl\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Creates a new default object ACL entry on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"request\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"list\": {\n     \"id\": \"storage.defaultObjectAccessControls.list\",\n     \"path\": \"b/{bucket}/defaultObjectAcl\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves default object ACL entries on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"If present, only return default ACL listing if the bucket's current metageneration matches this value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"If present, only return default ACL listing if the bucket's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"response\": {\n      \"$ref\": \"ObjectAccessControls\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"patch\": {\n     \"id\": \"storage.defaultObjectAccessControls.patch\",\n     \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Updates a default object ACL entry on the specified bucket. This method supports patch semantics.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"request\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"update\": {\n     \"id\": \"storage.defaultObjectAccessControls.update\",\n     \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Updates a default object ACL entry on the specified bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"entity\"\n     ],\n     \"request\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    }\n   }\n  },\n  \"objectAccessControls\": {\n   \"methods\": {\n    \"delete\": {\n     \"id\": \"storage.objectAccessControls.delete\",\n     \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Permanently deletes the ACL entry for the specified entity on the specified object.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\",\n      \"entity\"\n     ],\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"get\": {\n     \"id\": \"storage.objectAccessControls.get\",\n     \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Returns the ACL entry for the specified entity on the specified object.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\",\n      \"entity\"\n     ],\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"insert\": {\n     \"id\": \"storage.objectAccessControls.insert\",\n     \"path\": \"b/{bucket}/o/{object}/acl\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Creates a new ACL entry on the specified object.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\"\n     ],\n     \"request\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"list\": {\n     \"id\": \"storage.objectAccessControls.list\",\n     \"path\": \"b/{bucket}/o/{object}/acl\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves ACL entries on the specified object.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\"\n     ],\n     \"response\": {\n      \"$ref\": \"ObjectAccessControls\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"patch\": {\n     \"id\": \"storage.objectAccessControls.patch\",\n     \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Updates an ACL entry on the specified object. This method supports patch semantics.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\",\n      \"entity\"\n     ],\n     \"request\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    },\n    \"update\": {\n     \"id\": \"storage.objectAccessControls.update\",\n     \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Updates an ACL entry on the specified object.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of a bucket.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"entity\": {\n       \"type\": \"string\",\n       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\",\n      \"entity\"\n     ],\n     \"request\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"response\": {\n      \"$ref\": \"ObjectAccessControl\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/devstorage.full_control\"\n     ]\n    }\n   }\n  },\n  \"objects\": {\n   \"methods\": {\n    \"compose\": {\n     \"id\": \"storage.objects.compose\",\n     \"path\": \"b/{destinationBucket}/o/{destinationObject}/compose\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Concatenates a list of existing objects into a new object in the same bucket.\",\n     \"parameters\": {\n      \"destinationBucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to store the new object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"destinationObject\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the new object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"destinationPredefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to the destination object.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"destinationBucket\",\n      \"destinationObject\"\n     ],\n     \"request\": {\n      \"$ref\": \"ComposeRequest\"\n     },\n     \"response\": {\n      \"$ref\": \"Object\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsMediaDownload\": true\n    },\n    \"copy\": {\n     \"id\": \"storage.objects.copy\",\n     \"path\": \"b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Copies a source object to a destination object. Optionally overrides metadata.\",\n     \"parameters\": {\n      \"destinationBucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"destinationObject\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"destinationPredefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to the destination object.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"sourceBucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to find the source object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"sourceGeneration\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of the source object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"sourceObject\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the source object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"sourceBucket\",\n      \"sourceObject\",\n      \"destinationBucket\",\n      \"destinationObject\"\n     ],\n     \"request\": {\n      \"$ref\": \"Object\"\n     },\n     \"response\": {\n      \"$ref\": \"Object\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsMediaDownload\": true\n    },\n    \"delete\": {\n     \"id\": \"storage.objects.delete\",\n     \"path\": \"b/{bucket}/o/{object}\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which the object resides.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\"\n     ],\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"get\": {\n     \"id\": \"storage.objects.get\",\n     \"path\": \"b/{bucket}/o/{object}\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves an object or its metadata.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which the object resides.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\"\n     ],\n     \"response\": {\n      \"$ref\": \"Object\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_only\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsMediaDownload\": true\n    },\n    \"insert\": {\n     \"id\": \"storage.objects.insert\",\n     \"path\": \"b/{bucket}/o\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Stores a new object and metadata.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"contentEncoding\": {\n       \"type\": \"string\",\n       \"description\": \"If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"name\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.\",\n       \"location\": \"query\"\n      },\n      \"predefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to this object.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"request\": {\n      \"$ref\": \"Object\"\n     },\n     \"response\": {\n      \"$ref\": \"Object\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsMediaDownload\": true,\n     \"supportsMediaUpload\": true,\n     \"mediaUpload\": {\n      \"accept\": [\n       \"*/*\"\n      ],\n      \"protocols\": {\n       \"simple\": {\n        \"multipart\": true,\n        \"path\": \"/upload/storage/v1/b/{bucket}/o\"\n       },\n       \"resumable\": {\n        \"multipart\": true,\n        \"path\": \"/resumable/upload/storage/v1/b/{bucket}/o\"\n       }\n      }\n     }\n    },\n    \"list\": {\n     \"id\": \"storage.objects.list\",\n     \"path\": \"b/{bucket}/o\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Retrieves a list of objects matching the criteria.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to look for objects.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"delimiter\": {\n       \"type\": \"string\",\n       \"description\": \"Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.\",\n       \"location\": \"query\"\n      },\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.\",\n       \"format\": \"uint32\",\n       \"minimum\": \"0\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"A previously-returned page token representing part of the larger set of results to view.\",\n       \"location\": \"query\"\n      },\n      \"prefix\": {\n       \"type\": \"string\",\n       \"description\": \"Filter results to objects whose names begin with this prefix.\",\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"versions\": {\n       \"type\": \"boolean\",\n       \"description\": \"If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.\",\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"response\": {\n      \"$ref\": \"Objects\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_only\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsSubscription\": true\n    },\n    \"patch\": {\n     \"id\": \"storage.objects.patch\",\n     \"path\": \"b/{bucket}/o/{object}\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Updates an object's metadata. This method supports patch semantics.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which the object resides.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"predefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to this object.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\"\n     ],\n     \"request\": {\n      \"$ref\": \"Object\"\n     },\n     \"response\": {\n      \"$ref\": \"Object\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"rewrite\": {\n     \"id\": \"storage.objects.rewrite\",\n     \"path\": \"b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Rewrites a source object to a destination object. Optionally overrides metadata.\",\n     \"parameters\": {\n      \"destinationBucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"destinationObject\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"destinationPredefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to the destination object.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifSourceMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the source object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"maxBytesRewrittenPerCall\": {\n       \"type\": \"string\",\n       \"description\": \"The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"rewriteToken\": {\n       \"type\": \"string\",\n       \"description\": \"Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.\",\n       \"location\": \"query\"\n      },\n      \"sourceBucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to find the source object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"sourceGeneration\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of the source object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"sourceObject\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the source object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      }\n     },\n     \"parameterOrder\": [\n      \"sourceBucket\",\n      \"sourceObject\",\n      \"destinationBucket\",\n      \"destinationObject\"\n     ],\n     \"request\": {\n      \"$ref\": \"Object\"\n     },\n     \"response\": {\n      \"$ref\": \"RewriteResponse\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ]\n    },\n    \"update\": {\n     \"id\": \"storage.objects.update\",\n     \"path\": \"b/{bucket}/o/{object}\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Updates an object's metadata.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which the object resides.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"generation\": {\n       \"type\": \"string\",\n       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifGenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"ifMetagenerationNotMatch\": {\n       \"type\": \"string\",\n       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n       \"format\": \"int64\",\n       \"location\": \"query\"\n      },\n      \"object\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the object.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"predefinedAcl\": {\n       \"type\": \"string\",\n       \"description\": \"Apply a predefined set of access controls to this object.\",\n       \"enum\": [\n        \"authenticatedRead\",\n        \"bucketOwnerFullControl\",\n        \"bucketOwnerRead\",\n        \"private\",\n        \"projectPrivate\",\n        \"publicRead\"\n       ],\n       \"enumDescriptions\": [\n        \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n        \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n        \"Object owner gets OWNER access, and project team owners get READER access.\",\n        \"Object owner gets OWNER access.\",\n        \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n        \"Object owner gets OWNER access, and allUsers get READER access.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to full.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\",\n      \"object\"\n     ],\n     \"request\": {\n      \"$ref\": \"Object\"\n     },\n     \"response\": {\n      \"$ref\": \"Object\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsMediaDownload\": true\n    },\n    \"watchAll\": {\n     \"id\": \"storage.objects.watchAll\",\n     \"path\": \"b/{bucket}/o/watch\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Watch for changes on all objects in a bucket.\",\n     \"parameters\": {\n      \"bucket\": {\n       \"type\": \"string\",\n       \"description\": \"Name of the bucket in which to look for objects.\",\n       \"required\": true,\n       \"location\": \"path\"\n      },\n      \"delimiter\": {\n       \"type\": \"string\",\n       \"description\": \"Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.\",\n       \"location\": \"query\"\n      },\n      \"maxResults\": {\n       \"type\": \"integer\",\n       \"description\": \"Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.\",\n       \"format\": \"uint32\",\n       \"minimum\": \"0\",\n       \"location\": \"query\"\n      },\n      \"pageToken\": {\n       \"type\": \"string\",\n       \"description\": \"A previously-returned page token representing part of the larger set of results to view.\",\n       \"location\": \"query\"\n      },\n      \"prefix\": {\n       \"type\": \"string\",\n       \"description\": \"Filter results to objects whose names begin with this prefix.\",\n       \"location\": \"query\"\n      },\n      \"projection\": {\n       \"type\": \"string\",\n       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n       \"enum\": [\n        \"full\",\n        \"noAcl\"\n       ],\n       \"enumDescriptions\": [\n        \"Include all properties.\",\n        \"Omit the acl property.\"\n       ],\n       \"location\": \"query\"\n      },\n      \"versions\": {\n       \"type\": \"boolean\",\n       \"description\": \"If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.\",\n       \"location\": \"query\"\n      }\n     },\n     \"parameterOrder\": [\n      \"bucket\"\n     ],\n     \"request\": {\n      \"$ref\": \"Channel\",\n      \"parameterName\": \"resource\"\n     },\n     \"response\": {\n      \"$ref\": \"Channel\"\n     },\n     \"scopes\": [\n      \"https://www.googleapis.com/auth/cloud-platform\",\n      \"https://www.googleapis.com/auth/devstorage.full_control\",\n      \"https://www.googleapis.com/auth/devstorage.read_only\",\n      \"https://www.googleapis.com/auth/devstorage.read_write\"\n     ],\n     \"supportsSubscription\": true\n    }\n   }\n  }\n }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/api/storage/v1/storage-gen.go",
    "content": "// Package storage provides access to the Cloud Storage API.\n//\n// See https://developers.google.com/storage/docs/json_api/\n//\n// Usage example:\n//\n//   import \"google.golang.org/api/storage/v1\"\n//   ...\n//   storageService, err := storage.New(oauthHttpClient)\npackage storage\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/googleapi\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Always reference these packages, just in case the auto-generated code\n// below doesn't.\nvar _ = bytes.NewBuffer\nvar _ = strconv.Itoa\nvar _ = fmt.Sprintf\nvar _ = json.NewDecoder\nvar _ = io.Copy\nvar _ = url.Parse\nvar _ = googleapi.Version\nvar _ = errors.New\nvar _ = strings.Replace\nvar _ = context.Background\n\nconst apiId = \"storage:v1\"\nconst apiName = \"storage\"\nconst apiVersion = \"v1\"\nconst basePath = \"https://www.googleapis.com/storage/v1/\"\n\n// OAuth2 scopes used by this API.\nconst (\n\t// View and manage your data across Google Cloud Platform services\n\tCloudPlatformScope = \"https://www.googleapis.com/auth/cloud-platform\"\n\n\t// Manage your data and permissions in Google Cloud Storage\n\tDevstorageFullControlScope = \"https://www.googleapis.com/auth/devstorage.full_control\"\n\n\t// View your data in Google Cloud Storage\n\tDevstorageReadOnlyScope = \"https://www.googleapis.com/auth/devstorage.read_only\"\n\n\t// Manage your data in Google Cloud Storage\n\tDevstorageReadWriteScope = \"https://www.googleapis.com/auth/devstorage.read_write\"\n)\n\nfunc New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.BucketAccessControls = NewBucketAccessControlsService(s)\n\ts.Buckets = NewBucketsService(s)\n\ts.Channels = NewChannelsService(s)\n\ts.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s)\n\ts.ObjectAccessControls = NewObjectAccessControlsService(s)\n\ts.Objects = NewObjectsService(s)\n\treturn s, nil\n}\n\ntype Service struct {\n\tclient    *http.Client\n\tBasePath  string // API endpoint base URL\n\tUserAgent string // optional additional User-Agent fragment\n\n\tBucketAccessControls *BucketAccessControlsService\n\n\tBuckets *BucketsService\n\n\tChannels *ChannelsService\n\n\tDefaultObjectAccessControls *DefaultObjectAccessControlsService\n\n\tObjectAccessControls *ObjectAccessControlsService\n\n\tObjects *ObjectsService\n}\n\nfunc (s *Service) userAgent() string {\n\tif s.UserAgent == \"\" {\n\t\treturn googleapi.UserAgent\n\t}\n\treturn googleapi.UserAgent + \" \" + s.UserAgent\n}\n\nfunc NewBucketAccessControlsService(s *Service) *BucketAccessControlsService {\n\trs := &BucketAccessControlsService{s: s}\n\treturn rs\n}\n\ntype BucketAccessControlsService struct {\n\ts *Service\n}\n\nfunc NewBucketsService(s *Service) *BucketsService {\n\trs := &BucketsService{s: s}\n\treturn rs\n}\n\ntype BucketsService struct {\n\ts *Service\n}\n\nfunc NewChannelsService(s *Service) *ChannelsService {\n\trs := &ChannelsService{s: s}\n\treturn rs\n}\n\ntype ChannelsService struct {\n\ts *Service\n}\n\nfunc NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService {\n\trs := &DefaultObjectAccessControlsService{s: s}\n\treturn rs\n}\n\ntype DefaultObjectAccessControlsService struct {\n\ts *Service\n}\n\nfunc NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService {\n\trs := &ObjectAccessControlsService{s: s}\n\treturn rs\n}\n\ntype ObjectAccessControlsService struct {\n\ts *Service\n}\n\nfunc NewObjectsService(s *Service) *ObjectsService {\n\trs := &ObjectsService{s: s}\n\treturn rs\n}\n\ntype ObjectsService struct {\n\ts *Service\n}\n\ntype Bucket struct {\n\t// Acl: Access controls on the bucket.\n\tAcl []*BucketAccessControl `json:\"acl,omitempty\"`\n\n\t// Cors: The bucket's Cross-Origin Resource Sharing (CORS)\n\t// configuration.\n\tCors []*BucketCors `json:\"cors,omitempty\"`\n\n\t// DefaultObjectAcl: Default access controls to apply to new objects\n\t// when no ACL is provided.\n\tDefaultObjectAcl []*ObjectAccessControl `json:\"defaultObjectAcl,omitempty\"`\n\n\t// Etag: HTTP 1.1 Entity tag for the bucket.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Id: The ID of the bucket.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: The kind of item this is. For buckets, this is always\n\t// storage#bucket.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// Lifecycle: The bucket's lifecycle configuration. See lifecycle\n\t// management for more information.\n\tLifecycle *BucketLifecycle `json:\"lifecycle,omitempty\"`\n\n\t// Location: The location of the bucket. Object data for objects in the\n\t// bucket resides in physical storage within this region. Defaults to\n\t// US. See the developer's guide for the authoritative list.\n\tLocation string `json:\"location,omitempty\"`\n\n\t// Logging: The bucket's logging configuration, which defines the\n\t// destination bucket and optional name prefix for the current bucket's\n\t// logs.\n\tLogging *BucketLogging `json:\"logging,omitempty\"`\n\n\t// Metageneration: The metadata generation of this bucket.\n\tMetageneration int64 `json:\"metageneration,omitempty,string\"`\n\n\t// Name: The name of the bucket.\n\tName string `json:\"name,omitempty\"`\n\n\t// Owner: The owner of the bucket. This is always the project team's\n\t// owner group.\n\tOwner *BucketOwner `json:\"owner,omitempty\"`\n\n\t// ProjectNumber: The project number of the project the bucket belongs\n\t// to.\n\tProjectNumber uint64 `json:\"projectNumber,omitempty,string\"`\n\n\t// SelfLink: The URI of this bucket.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n\n\t// StorageClass: The bucket's storage class. This defines how objects in\n\t// the bucket are stored and determines the SLA and the cost of storage.\n\t// Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY.\n\t// Defaults to STANDARD. For more information, see storage classes.\n\tStorageClass string `json:\"storageClass,omitempty\"`\n\n\t// TimeCreated: Creation time of the bucket in RFC 3339 format.\n\tTimeCreated string `json:\"timeCreated,omitempty\"`\n\n\t// Versioning: The bucket's versioning configuration.\n\tVersioning *BucketVersioning `json:\"versioning,omitempty\"`\n\n\t// Website: The bucket's website configuration.\n\tWebsite *BucketWebsite `json:\"website,omitempty\"`\n}\n\ntype BucketCors struct {\n\t// MaxAgeSeconds: The value, in seconds, to return in the\n\t// Access-Control-Max-Age header used in preflight responses.\n\tMaxAgeSeconds int64 `json:\"maxAgeSeconds,omitempty\"`\n\n\t// Method: The list of HTTP methods on which to include CORS response\n\t// headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list\n\t// of methods, and means \"any method\".\n\tMethod []string `json:\"method,omitempty\"`\n\n\t// Origin: The list of Origins eligible to receive CORS response\n\t// headers. Note: \"*\" is permitted in the list of origins, and means\n\t// \"any Origin\".\n\tOrigin []string `json:\"origin,omitempty\"`\n\n\t// ResponseHeader: The list of HTTP headers other than the simple\n\t// response headers to give permission for the user-agent to share\n\t// across domains.\n\tResponseHeader []string `json:\"responseHeader,omitempty\"`\n}\n\ntype BucketLifecycle struct {\n\t// Rule: A lifecycle management rule, which is made of an action to take\n\t// and the condition(s) under which the action will be taken.\n\tRule []*BucketLifecycleRule `json:\"rule,omitempty\"`\n}\n\ntype BucketLifecycleRule struct {\n\t// Action: The action to take.\n\tAction *BucketLifecycleRuleAction `json:\"action,omitempty\"`\n\n\t// Condition: The condition(s) under which the action will be taken.\n\tCondition *BucketLifecycleRuleCondition `json:\"condition,omitempty\"`\n}\n\ntype BucketLifecycleRuleAction struct {\n\t// Type: Type of the action. Currently, only Delete is supported.\n\tType string `json:\"type,omitempty\"`\n}\n\ntype BucketLifecycleRuleCondition struct {\n\t// Age: Age of an object (in days). This condition is satisfied when an\n\t// object reaches the specified age.\n\tAge int64 `json:\"age,omitempty\"`\n\n\t// CreatedBefore: A date in RFC 3339 format with only the date part (for\n\t// instance, \"2013-01-15\"). This condition is satisfied when an object\n\t// is created before midnight of the specified date in UTC.\n\tCreatedBefore string `json:\"createdBefore,omitempty\"`\n\n\t// IsLive: Relevant only for versioned objects. If the value is true,\n\t// this condition matches live objects; if the value is false, it\n\t// matches archived objects.\n\tIsLive bool `json:\"isLive,omitempty\"`\n\n\t// NumNewerVersions: Relevant only for versioned objects. If the value\n\t// is N, this condition is satisfied when there are at least N versions\n\t// (including the live version) newer than this version of the object.\n\tNumNewerVersions int64 `json:\"numNewerVersions,omitempty\"`\n}\n\ntype BucketLogging struct {\n\t// LogBucket: The destination bucket where the current bucket's logs\n\t// should be placed.\n\tLogBucket string `json:\"logBucket,omitempty\"`\n\n\t// LogObjectPrefix: A prefix for log object names.\n\tLogObjectPrefix string `json:\"logObjectPrefix,omitempty\"`\n}\n\ntype BucketOwner struct {\n\t// Entity: The entity, in the form project-owner-projectId.\n\tEntity string `json:\"entity,omitempty\"`\n\n\t// EntityId: The ID for the entity.\n\tEntityId string `json:\"entityId,omitempty\"`\n}\n\ntype BucketVersioning struct {\n\t// Enabled: While set to true, versioning is fully enabled for this\n\t// bucket.\n\tEnabled bool `json:\"enabled,omitempty\"`\n}\n\ntype BucketWebsite struct {\n\t// MainPageSuffix: Behaves as the bucket's directory index where missing\n\t// objects are treated as potential directories.\n\tMainPageSuffix string `json:\"mainPageSuffix,omitempty\"`\n\n\t// NotFoundPage: The custom object to return when a requested resource\n\t// is not found.\n\tNotFoundPage string `json:\"notFoundPage,omitempty\"`\n}\n\ntype BucketAccessControl struct {\n\t// Bucket: The name of the bucket.\n\tBucket string `json:\"bucket,omitempty\"`\n\n\t// Domain: The domain associated with the entity, if any.\n\tDomain string `json:\"domain,omitempty\"`\n\n\t// Email: The email address associated with the entity, if any.\n\tEmail string `json:\"email,omitempty\"`\n\n\t// Entity: The entity holding the permission, in one of the following\n\t// forms:\n\t// - user-userId\n\t// - user-email\n\t// - group-groupId\n\t// - group-email\n\t// - domain-domain\n\t// - project-team-projectId\n\t// - allUsers\n\t// - allAuthenticatedUsers Examples:\n\t// - The user liz@example.com would be user-liz@example.com.\n\t// - The group example@googlegroups.com would be\n\t// group-example@googlegroups.com.\n\t// - To refer to all members of the Google Apps for Business domain\n\t// example.com, the entity would be domain-example.com.\n\tEntity string `json:\"entity,omitempty\"`\n\n\t// EntityId: The ID for the entity, if any.\n\tEntityId string `json:\"entityId,omitempty\"`\n\n\t// Etag: HTTP 1.1 Entity tag for the access-control entry.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Id: The ID of the access-control entry.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: The kind of item this is. For bucket access control entries,\n\t// this is always storage#bucketAccessControl.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// ProjectTeam: The project team associated with the entity, if any.\n\tProjectTeam *BucketAccessControlProjectTeam `json:\"projectTeam,omitempty\"`\n\n\t// Role: The access permission for the entity. Can be READER, WRITER, or\n\t// OWNER.\n\tRole string `json:\"role,omitempty\"`\n\n\t// SelfLink: The link to this access-control entry.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n}\n\ntype BucketAccessControlProjectTeam struct {\n\t// ProjectNumber: The project number.\n\tProjectNumber string `json:\"projectNumber,omitempty\"`\n\n\t// Team: The team. Can be owners, editors, or viewers.\n\tTeam string `json:\"team,omitempty\"`\n}\n\ntype BucketAccessControls struct {\n\t// Items: The list of items.\n\tItems []*BucketAccessControl `json:\"items,omitempty\"`\n\n\t// Kind: The kind of item this is. For lists of bucket access control\n\t// entries, this is always storage#bucketAccessControls.\n\tKind string `json:\"kind,omitempty\"`\n}\n\ntype Buckets struct {\n\t// Items: The list of items.\n\tItems []*Bucket `json:\"items,omitempty\"`\n\n\t// Kind: The kind of item this is. For lists of buckets, this is always\n\t// storage#buckets.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// NextPageToken: The continuation token, used to page through large\n\t// result sets. Provide this value in a subsequent request to return the\n\t// next page of results.\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n}\n\ntype Channel struct {\n\t// Address: The address where notifications are delivered for this\n\t// channel.\n\tAddress string `json:\"address,omitempty\"`\n\n\t// Expiration: Date and time of notification channel expiration,\n\t// expressed as a Unix timestamp, in milliseconds. Optional.\n\tExpiration int64 `json:\"expiration,omitempty,string\"`\n\n\t// Id: A UUID or similar unique string that identifies this channel.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: Identifies this as a notification channel used to watch for\n\t// changes to a resource. Value: the fixed string \"api#channel\".\n\tKind string `json:\"kind,omitempty\"`\n\n\t// Params: Additional parameters controlling delivery channel behavior.\n\t// Optional.\n\tParams map[string]string `json:\"params,omitempty\"`\n\n\t// Payload: A Boolean value to indicate whether payload is wanted.\n\t// Optional.\n\tPayload bool `json:\"payload,omitempty\"`\n\n\t// ResourceId: An opaque ID that identifies the resource being watched\n\t// on this channel. Stable across different API versions.\n\tResourceId string `json:\"resourceId,omitempty\"`\n\n\t// ResourceUri: A version-specific identifier for the watched resource.\n\tResourceUri string `json:\"resourceUri,omitempty\"`\n\n\t// Token: An arbitrary string delivered to the target address with each\n\t// notification delivered over this channel. Optional.\n\tToken string `json:\"token,omitempty\"`\n\n\t// Type: The type of delivery mechanism used for this channel.\n\tType string `json:\"type,omitempty\"`\n}\n\ntype ComposeRequest struct {\n\t// Destination: Properties of the resulting object.\n\tDestination *Object `json:\"destination,omitempty\"`\n\n\t// Kind: The kind of item this is.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// SourceObjects: The list of source objects that will be concatenated\n\t// into a single object.\n\tSourceObjects []*ComposeRequestSourceObjects `json:\"sourceObjects,omitempty\"`\n}\n\ntype ComposeRequestSourceObjects struct {\n\t// Generation: The generation of this object to use as the source.\n\tGeneration int64 `json:\"generation,omitempty,string\"`\n\n\t// Name: The source object's name. The source object's bucket is\n\t// implicitly the destination bucket.\n\tName string `json:\"name,omitempty\"`\n\n\t// ObjectPreconditions: Conditions that must be met for this operation\n\t// to execute.\n\tObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:\"objectPreconditions,omitempty\"`\n}\n\ntype ComposeRequestSourceObjectsObjectPreconditions struct {\n\t// IfGenerationMatch: Only perform the composition if the generation of\n\t// the source object that would be used matches this value. If this\n\t// value and a generation are both specified, they must be the same\n\t// value or the call will fail.\n\tIfGenerationMatch int64 `json:\"ifGenerationMatch,omitempty,string\"`\n}\n\ntype Object struct {\n\t// Acl: Access controls on the object.\n\tAcl []*ObjectAccessControl `json:\"acl,omitempty\"`\n\n\t// Bucket: The name of the bucket containing this object.\n\tBucket string `json:\"bucket,omitempty\"`\n\n\t// CacheControl: Cache-Control directive for the object data.\n\tCacheControl string `json:\"cacheControl,omitempty\"`\n\n\t// ComponentCount: Number of underlying components that make up this\n\t// object. Components are accumulated by compose operations.\n\tComponentCount int64 `json:\"componentCount,omitempty\"`\n\n\t// ContentDisposition: Content-Disposition of the object data.\n\tContentDisposition string `json:\"contentDisposition,omitempty\"`\n\n\t// ContentEncoding: Content-Encoding of the object data.\n\tContentEncoding string `json:\"contentEncoding,omitempty\"`\n\n\t// ContentLanguage: Content-Language of the object data.\n\tContentLanguage string `json:\"contentLanguage,omitempty\"`\n\n\t// ContentType: Content-Type of the object data.\n\tContentType string `json:\"contentType,omitempty\"`\n\n\t// Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B;\n\t// encoded using base64 in big-endian byte order.\n\tCrc32c string `json:\"crc32c,omitempty\"`\n\n\t// Etag: HTTP 1.1 Entity tag for the object.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Generation: The content generation of this object. Used for object\n\t// versioning.\n\tGeneration int64 `json:\"generation,omitempty,string\"`\n\n\t// Id: The ID of the object.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: The kind of item this is. For objects, this is always\n\t// storage#object.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// Md5Hash: MD5 hash of the data; encoded using base64.\n\tMd5Hash string `json:\"md5Hash,omitempty\"`\n\n\t// MediaLink: Media download link.\n\tMediaLink string `json:\"mediaLink,omitempty\"`\n\n\t// Metadata: User-provided metadata, in key/value pairs.\n\tMetadata map[string]string `json:\"metadata,omitempty\"`\n\n\t// Metageneration: The version of the metadata for this object at this\n\t// generation. Used for preconditions and for detecting changes in\n\t// metadata. A metageneration number is only meaningful in the context\n\t// of a particular generation of a particular object.\n\tMetageneration int64 `json:\"metageneration,omitempty,string\"`\n\n\t// Name: The name of this object. Required if not specified by URL\n\t// parameter.\n\tName string `json:\"name,omitempty\"`\n\n\t// Owner: The owner of the object. This will always be the uploader of\n\t// the object.\n\tOwner *ObjectOwner `json:\"owner,omitempty\"`\n\n\t// SelfLink: The link to this object.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n\n\t// Size: Content-Length of the data in bytes.\n\tSize uint64 `json:\"size,omitempty,string\"`\n\n\t// StorageClass: Storage class of the object.\n\tStorageClass string `json:\"storageClass,omitempty\"`\n\n\t// TimeDeleted: The deletion time of the object in RFC 3339 format. Will\n\t// be returned if and only if this version of the object has been\n\t// deleted.\n\tTimeDeleted string `json:\"timeDeleted,omitempty\"`\n\n\t// Updated: The creation or modification time of the object in RFC 3339\n\t// format. For buckets with versioning enabled, changing an object's\n\t// metadata does not change this property.\n\tUpdated string `json:\"updated,omitempty\"`\n}\n\ntype ObjectOwner struct {\n\t// Entity: The entity, in the form user-userId.\n\tEntity string `json:\"entity,omitempty\"`\n\n\t// EntityId: The ID for the entity.\n\tEntityId string `json:\"entityId,omitempty\"`\n}\n\ntype ObjectAccessControl struct {\n\t// Bucket: The name of the bucket.\n\tBucket string `json:\"bucket,omitempty\"`\n\n\t// Domain: The domain associated with the entity, if any.\n\tDomain string `json:\"domain,omitempty\"`\n\n\t// Email: The email address associated with the entity, if any.\n\tEmail string `json:\"email,omitempty\"`\n\n\t// Entity: The entity holding the permission, in one of the following\n\t// forms:\n\t// - user-userId\n\t// - user-email\n\t// - group-groupId\n\t// - group-email\n\t// - domain-domain\n\t// - project-team-projectId\n\t// - allUsers\n\t// - allAuthenticatedUsers Examples:\n\t// - The user liz@example.com would be user-liz@example.com.\n\t// - The group example@googlegroups.com would be\n\t// group-example@googlegroups.com.\n\t// - To refer to all members of the Google Apps for Business domain\n\t// example.com, the entity would be domain-example.com.\n\tEntity string `json:\"entity,omitempty\"`\n\n\t// EntityId: The ID for the entity, if any.\n\tEntityId string `json:\"entityId,omitempty\"`\n\n\t// Etag: HTTP 1.1 Entity tag for the access-control entry.\n\tEtag string `json:\"etag,omitempty\"`\n\n\t// Generation: The content generation of the object.\n\tGeneration int64 `json:\"generation,omitempty,string\"`\n\n\t// Id: The ID of the access-control entry.\n\tId string `json:\"id,omitempty\"`\n\n\t// Kind: The kind of item this is. For object access control entries,\n\t// this is always storage#objectAccessControl.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// Object: The name of the object.\n\tObject string `json:\"object,omitempty\"`\n\n\t// ProjectTeam: The project team associated with the entity, if any.\n\tProjectTeam *ObjectAccessControlProjectTeam `json:\"projectTeam,omitempty\"`\n\n\t// Role: The access permission for the entity. Can be READER or OWNER.\n\tRole string `json:\"role,omitempty\"`\n\n\t// SelfLink: The link to this access-control entry.\n\tSelfLink string `json:\"selfLink,omitempty\"`\n}\n\ntype ObjectAccessControlProjectTeam struct {\n\t// ProjectNumber: The project number.\n\tProjectNumber string `json:\"projectNumber,omitempty\"`\n\n\t// Team: The team. Can be owners, editors, or viewers.\n\tTeam string `json:\"team,omitempty\"`\n}\n\ntype ObjectAccessControls struct {\n\t// Items: The list of items.\n\tItems []interface{} `json:\"items,omitempty\"`\n\n\t// Kind: The kind of item this is. For lists of object access control\n\t// entries, this is always storage#objectAccessControls.\n\tKind string `json:\"kind,omitempty\"`\n}\n\ntype Objects struct {\n\t// Items: The list of items.\n\tItems []*Object `json:\"items,omitempty\"`\n\n\t// Kind: The kind of item this is. For lists of objects, this is always\n\t// storage#objects.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// NextPageToken: The continuation token, used to page through large\n\t// result sets. Provide this value in a subsequent request to return the\n\t// next page of results.\n\tNextPageToken string `json:\"nextPageToken,omitempty\"`\n\n\t// Prefixes: The list of prefixes of objects matching-but-not-listed up\n\t// to and including the requested delimiter.\n\tPrefixes []string `json:\"prefixes,omitempty\"`\n}\n\ntype RewriteResponse struct {\n\t// Done: true if the copy is finished; otherwise, false if the copy is\n\t// in progress. This property is always present in the response.\n\tDone bool `json:\"done,omitempty\"`\n\n\t// Kind: The kind of item this is.\n\tKind string `json:\"kind,omitempty\"`\n\n\t// ObjectSize: The total size of the object being copied in bytes. This\n\t// property is always present in the response.\n\tObjectSize uint64 `json:\"objectSize,omitempty,string\"`\n\n\t// Resource: A resource containing the metadata for the copied-to\n\t// object. This property is present in the response only when copying\n\t// completes.\n\tResource *Object `json:\"resource,omitempty\"`\n\n\t// RewriteToken: A token to use in subsequent requests to continue\n\t// copying data. This token is present in the response only when there\n\t// is more data to copy.\n\tRewriteToken string `json:\"rewriteToken,omitempty\"`\n\n\t// TotalBytesRewritten: The total bytes written so far, which can be\n\t// used to provide a waiting user with a progress indicator. This\n\t// property is always present in the response.\n\tTotalBytesRewritten uint64 `json:\"totalBytesRewritten,omitempty,string\"`\n}\n\n// method id \"storage.bucketAccessControls.delete\":\n\ntype BucketAccessControlsDeleteCall struct {\n\ts      *Service\n\tbucket string\n\tentity string\n\topt_   map[string]interface{}\n}\n\n// Delete: Permanently deletes the ACL entry for the specified entity on\n// the specified bucket.\nfunc (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall {\n\tc := &BucketAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketAccessControlsDeleteCall) Do() error {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Permanently deletes the ACL entry for the specified entity on the specified bucket.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"storage.bucketAccessControls.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/acl/{entity}\",\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.bucketAccessControls.get\":\n\ntype BucketAccessControlsGetCall struct {\n\ts      *Service\n\tbucket string\n\tentity string\n\topt_   map[string]interface{}\n}\n\n// Get: Returns the ACL entry for the specified entity on the specified\n// bucket.\nfunc (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall {\n\tc := &BucketAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketAccessControlsGetCall) Do() (*BucketAccessControl, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *BucketAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Returns the ACL entry for the specified entity on the specified bucket.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.bucketAccessControls.get\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/acl/{entity}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"BucketAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.bucketAccessControls.insert\":\n\ntype BucketAccessControlsInsertCall struct {\n\ts                   *Service\n\tbucket              string\n\tbucketaccesscontrol *BucketAccessControl\n\topt_                map[string]interface{}\n}\n\n// Insert: Creates a new ACL entry on the specified bucket.\nfunc (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall {\n\tc := &BucketAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.bucketaccesscontrol = bucketaccesscontrol\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketAccessControlsInsertCall) Do() (*BucketAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/acl\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *BucketAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a new ACL entry on the specified bucket.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.bucketAccessControls.insert\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/acl\",\n\t//   \"request\": {\n\t//     \"$ref\": \"BucketAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"BucketAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.bucketAccessControls.list\":\n\ntype BucketAccessControlsListCall struct {\n\ts      *Service\n\tbucket string\n\topt_   map[string]interface{}\n}\n\n// List: Retrieves ACL entries on the specified bucket.\nfunc (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall {\n\tc := &BucketAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketAccessControlsListCall) Do() (*BucketAccessControls, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/acl\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *BucketAccessControls\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves ACL entries on the specified bucket.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.bucketAccessControls.list\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/acl\",\n\t//   \"response\": {\n\t//     \"$ref\": \"BucketAccessControls\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.bucketAccessControls.patch\":\n\ntype BucketAccessControlsPatchCall struct {\n\ts                   *Service\n\tbucket              string\n\tentity              string\n\tbucketaccesscontrol *BucketAccessControl\n\topt_                map[string]interface{}\n}\n\n// Patch: Updates an ACL entry on the specified bucket. This method\n// supports patch semantics.\nfunc (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall {\n\tc := &BucketAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\tc.bucketaccesscontrol = bucketaccesscontrol\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketAccessControlsPatchCall) Do() (*BucketAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PATCH\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *BucketAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates an ACL entry on the specified bucket. This method supports patch semantics.\",\n\t//   \"httpMethod\": \"PATCH\",\n\t//   \"id\": \"storage.bucketAccessControls.patch\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/acl/{entity}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"BucketAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"BucketAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.bucketAccessControls.update\":\n\ntype BucketAccessControlsUpdateCall struct {\n\ts                   *Service\n\tbucket              string\n\tentity              string\n\tbucketaccesscontrol *BucketAccessControl\n\topt_                map[string]interface{}\n}\n\n// Update: Updates an ACL entry on the specified bucket.\nfunc (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall {\n\tc := &BucketAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\tc.bucketaccesscontrol = bucketaccesscontrol\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketAccessControlsUpdateCall) Do() (*BucketAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *BucketAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates an ACL entry on the specified bucket.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"storage.bucketAccessControls.update\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/acl/{entity}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"BucketAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"BucketAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.buckets.delete\":\n\ntype BucketsDeleteCall struct {\n\ts      *Service\n\tbucket string\n\topt_   map[string]interface{}\n}\n\n// Delete: Permanently deletes an empty bucket.\nfunc (r *BucketsService) Delete(bucket string) *BucketsDeleteCall {\n\tc := &BucketsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": If set, only deletes the bucket if its\n// metageneration matches this value.\nfunc (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": If set, only deletes the bucket if its\n// metageneration does not match this value.\nfunc (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketsDeleteCall) Do() error {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Permanently deletes an empty bucket.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"storage.buckets.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"If set, only deletes the bucket if its metageneration matches this value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"If set, only deletes the bucket if its metageneration does not match this value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}\",\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.buckets.get\":\n\ntype BucketsGetCall struct {\n\ts      *Service\n\tbucket string\n\topt_   map[string]interface{}\n}\n\n// Get: Returns metadata for the specified bucket.\nfunc (r *BucketsService) Get(bucket string) *BucketsGetCall {\n\tc := &BucketsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the return of the bucket metadata\n// conditional on whether the bucket's current metageneration matches\n// the given value.\nfunc (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the return of the bucket metadata\n// conditional on whether the bucket's current metageneration does not\n// match the given value.\nfunc (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit acl and defaultObjectAcl properties.\nfunc (c *BucketsGetCall) Projection(projection string) *BucketsGetCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketsGetCall) Do() (*Bucket, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Bucket\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Returns metadata for the specified bucket.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.buckets.get\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit acl and defaultObjectAcl properties.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Bucket\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_only\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.buckets.insert\":\n\ntype BucketsInsertCall struct {\n\ts         *Service\n\tprojectid string\n\tbucket    *Bucket\n\topt_      map[string]interface{}\n}\n\n// Insert: Creates a new bucket.\nfunc (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall {\n\tc := &BucketsInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectid = projectid\n\tc.bucket = bucket\n\treturn c\n}\n\n// PredefinedAcl sets the optional parameter \"predefinedAcl\": Apply a\n// predefined set of access controls to this bucket.\n//\n// Possible values:\n//   \"authenticatedRead\" - Project team owners get OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"private\" - Project team owners get OWNER access.\n//   \"projectPrivate\" - Project team members get access according to\n// their roles.\n//   \"publicRead\" - Project team owners get OWNER access, and allUsers\n// get READER access.\n//   \"publicReadWrite\" - Project team owners get OWNER access, and\n// allUsers get WRITER access.\nfunc (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall {\n\tc.opt_[\"predefinedAcl\"] = predefinedAcl\n\treturn c\n}\n\n// PredefinedDefaultObjectAcl sets the optional parameter\n// \"predefinedDefaultObjectAcl\": Apply a predefined set of default\n// object access controls to this bucket.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall {\n\tc.opt_[\"predefinedDefaultObjectAcl\"] = predefinedDefaultObjectAcl\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl, unless the bucket resource\n// specifies acl or defaultObjectAcl properties, when it defaults to\n// full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit acl and defaultObjectAcl properties.\nfunc (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketsInsertCall) Do() (*Bucket, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tparams.Set(\"project\", fmt.Sprintf(\"%v\", c.projectid))\n\tif v, ok := c.opt_[\"predefinedAcl\"]; ok {\n\t\tparams.Set(\"predefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedDefaultObjectAcl\"]; ok {\n\t\tparams.Set(\"predefinedDefaultObjectAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.SetOpaque(req.URL)\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Bucket\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a new bucket.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.buckets.insert\",\n\t//   \"parameterOrder\": [\n\t//     \"project\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"predefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to this bucket.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\",\n\t//         \"publicReadWrite\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Project team owners get OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Project team owners get OWNER access.\",\n\t//         \"Project team members get access according to their roles.\",\n\t//         \"Project team owners get OWNER access, and allUsers get READER access.\",\n\t//         \"Project team owners get OWNER access, and allUsers get WRITER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedDefaultObjectAcl\": {\n\t//       \"description\": \"Apply a predefined set of default object access controls to this bucket.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"project\": {\n\t//       \"description\": \"A valid API project identifier.\",\n\t//       \"location\": \"query\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit acl and defaultObjectAcl properties.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Bucket\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Bucket\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.buckets.list\":\n\ntype BucketsListCall struct {\n\ts         *Service\n\tprojectid string\n\topt_      map[string]interface{}\n}\n\n// List: Retrieves a list of buckets for a given project.\nfunc (r *BucketsService) List(projectid string) *BucketsListCall {\n\tc := &BucketsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.projectid = projectid\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of buckets to return.\nfunc (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": A\n// previously-returned page token representing part of the larger set of\n// results to view.\nfunc (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Prefix sets the optional parameter \"prefix\": Filter results to\n// buckets whose names begin with this prefix.\nfunc (c *BucketsListCall) Prefix(prefix string) *BucketsListCall {\n\tc.opt_[\"prefix\"] = prefix\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit acl and defaultObjectAcl properties.\nfunc (c *BucketsListCall) Projection(projection string) *BucketsListCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketsListCall) Do() (*Buckets, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tparams.Set(\"project\", fmt.Sprintf(\"%v\", c.projectid))\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"prefix\"]; ok {\n\t\tparams.Set(\"prefix\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.SetOpaque(req.URL)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Buckets\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves a list of buckets for a given project.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.buckets.list\",\n\t//   \"parameterOrder\": [\n\t//     \"project\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of buckets to return.\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"minimum\": \"0\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"A previously-returned page token representing part of the larger set of results to view.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"prefix\": {\n\t//       \"description\": \"Filter results to buckets whose names begin with this prefix.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"project\": {\n\t//       \"description\": \"A valid API project identifier.\",\n\t//       \"location\": \"query\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit acl and defaultObjectAcl properties.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Buckets\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_only\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.buckets.patch\":\n\ntype BucketsPatchCall struct {\n\ts       *Service\n\tbucket  string\n\tbucket2 *Bucket\n\topt_    map[string]interface{}\n}\n\n// Patch: Updates a bucket. This method supports patch semantics.\nfunc (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall {\n\tc := &BucketsPatchCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.bucket2 = bucket2\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the return of the bucket metadata\n// conditional on whether the bucket's current metageneration matches\n// the given value.\nfunc (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the return of the bucket metadata\n// conditional on whether the bucket's current metageneration does not\n// match the given value.\nfunc (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// PredefinedAcl sets the optional parameter \"predefinedAcl\": Apply a\n// predefined set of access controls to this bucket.\n//\n// Possible values:\n//   \"authenticatedRead\" - Project team owners get OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"private\" - Project team owners get OWNER access.\n//   \"projectPrivate\" - Project team members get access according to\n// their roles.\n//   \"publicRead\" - Project team owners get OWNER access, and allUsers\n// get READER access.\n//   \"publicReadWrite\" - Project team owners get OWNER access, and\n// allUsers get WRITER access.\nfunc (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall {\n\tc.opt_[\"predefinedAcl\"] = predefinedAcl\n\treturn c\n}\n\n// PredefinedDefaultObjectAcl sets the optional parameter\n// \"predefinedDefaultObjectAcl\": Apply a predefined set of default\n// object access controls to this bucket.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall {\n\tc.opt_[\"predefinedDefaultObjectAcl\"] = predefinedDefaultObjectAcl\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit acl and defaultObjectAcl properties.\nfunc (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketsPatchCall) Do() (*Bucket, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedAcl\"]; ok {\n\t\tparams.Set(\"predefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedDefaultObjectAcl\"]; ok {\n\t\tparams.Set(\"predefinedDefaultObjectAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PATCH\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Bucket\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates a bucket. This method supports patch semantics.\",\n\t//   \"httpMethod\": \"PATCH\",\n\t//   \"id\": \"storage.buckets.patch\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to this bucket.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\",\n\t//         \"publicReadWrite\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Project team owners get OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Project team owners get OWNER access.\",\n\t//         \"Project team members get access according to their roles.\",\n\t//         \"Project team owners get OWNER access, and allUsers get READER access.\",\n\t//         \"Project team owners get OWNER access, and allUsers get WRITER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedDefaultObjectAcl\": {\n\t//       \"description\": \"Apply a predefined set of default object access controls to this bucket.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit acl and defaultObjectAcl properties.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Bucket\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Bucket\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.buckets.update\":\n\ntype BucketsUpdateCall struct {\n\ts       *Service\n\tbucket  string\n\tbucket2 *Bucket\n\topt_    map[string]interface{}\n}\n\n// Update: Updates a bucket.\nfunc (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall {\n\tc := &BucketsUpdateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.bucket2 = bucket2\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the return of the bucket metadata\n// conditional on whether the bucket's current metageneration matches\n// the given value.\nfunc (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the return of the bucket metadata\n// conditional on whether the bucket's current metageneration does not\n// match the given value.\nfunc (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// PredefinedAcl sets the optional parameter \"predefinedAcl\": Apply a\n// predefined set of access controls to this bucket.\n//\n// Possible values:\n//   \"authenticatedRead\" - Project team owners get OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"private\" - Project team owners get OWNER access.\n//   \"projectPrivate\" - Project team members get access according to\n// their roles.\n//   \"publicRead\" - Project team owners get OWNER access, and allUsers\n// get READER access.\n//   \"publicReadWrite\" - Project team owners get OWNER access, and\n// allUsers get WRITER access.\nfunc (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall {\n\tc.opt_[\"predefinedAcl\"] = predefinedAcl\n\treturn c\n}\n\n// PredefinedDefaultObjectAcl sets the optional parameter\n// \"predefinedDefaultObjectAcl\": Apply a predefined set of default\n// object access controls to this bucket.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall {\n\tc.opt_[\"predefinedDefaultObjectAcl\"] = predefinedDefaultObjectAcl\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit acl and defaultObjectAcl properties.\nfunc (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *BucketsUpdateCall) Do() (*Bucket, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedAcl\"]; ok {\n\t\tparams.Set(\"predefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedDefaultObjectAcl\"]; ok {\n\t\tparams.Set(\"predefinedDefaultObjectAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Bucket\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates a bucket.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"storage.buckets.update\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to this bucket.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\",\n\t//         \"publicReadWrite\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Project team owners get OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Project team owners get OWNER access.\",\n\t//         \"Project team members get access according to their roles.\",\n\t//         \"Project team owners get OWNER access, and allUsers get READER access.\",\n\t//         \"Project team owners get OWNER access, and allUsers get WRITER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedDefaultObjectAcl\": {\n\t//       \"description\": \"Apply a predefined set of default object access controls to this bucket.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit acl and defaultObjectAcl properties.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Bucket\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Bucket\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.channels.stop\":\n\ntype ChannelsStopCall struct {\n\ts       *Service\n\tchannel *Channel\n\topt_    map[string]interface{}\n}\n\n// Stop: Stop watching resources through this channel\nfunc (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall {\n\tc := &ChannelsStopCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.channel = channel\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ChannelsStopCall) Do() error {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"channels/stop\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.SetOpaque(req.URL)\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Stop watching resources through this channel\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.channels.stop\",\n\t//   \"path\": \"channels/stop\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Channel\",\n\t//     \"parameterName\": \"resource\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_only\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.defaultObjectAccessControls.delete\":\n\ntype DefaultObjectAccessControlsDeleteCall struct {\n\ts      *Service\n\tbucket string\n\tentity string\n\topt_   map[string]interface{}\n}\n\n// Delete: Permanently deletes the default object ACL entry for the\n// specified entity on the specified bucket.\nfunc (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall {\n\tc := &DefaultObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DefaultObjectAccessControlsDeleteCall) Do() error {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/defaultObjectAcl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Permanently deletes the default object ACL entry for the specified entity on the specified bucket.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"storage.defaultObjectAccessControls.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.defaultObjectAccessControls.get\":\n\ntype DefaultObjectAccessControlsGetCall struct {\n\ts      *Service\n\tbucket string\n\tentity string\n\topt_   map[string]interface{}\n}\n\n// Get: Returns the default object ACL entry for the specified entity on\n// the specified bucket.\nfunc (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall {\n\tc := &DefaultObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DefaultObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/defaultObjectAcl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Returns the default object ACL entry for the specified entity on the specified bucket.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.defaultObjectAccessControls.get\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.defaultObjectAccessControls.insert\":\n\ntype DefaultObjectAccessControlsInsertCall struct {\n\ts                   *Service\n\tbucket              string\n\tobjectaccesscontrol *ObjectAccessControl\n\topt_                map[string]interface{}\n}\n\n// Insert: Creates a new default object ACL entry on the specified\n// bucket.\nfunc (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall {\n\tc := &DefaultObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.objectaccesscontrol = objectaccesscontrol\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DefaultObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/defaultObjectAcl\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a new default object ACL entry on the specified bucket.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.defaultObjectAccessControls.insert\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/defaultObjectAcl\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.defaultObjectAccessControls.list\":\n\ntype DefaultObjectAccessControlsListCall struct {\n\ts      *Service\n\tbucket string\n\topt_   map[string]interface{}\n}\n\n// List: Retrieves default object ACL entries on the specified bucket.\nfunc (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall {\n\tc := &DefaultObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": If present, only return default ACL listing\n// if the bucket's current metageneration matches this value.\nfunc (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": If present, only return default ACL\n// listing if the bucket's current metageneration does not match the\n// given value.\nfunc (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DefaultObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/defaultObjectAcl\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControls\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves default object ACL entries on the specified bucket.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.defaultObjectAccessControls.list\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"If present, only return default ACL listing if the bucket's current metageneration matches this value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"If present, only return default ACL listing if the bucket's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/defaultObjectAcl\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControls\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.defaultObjectAccessControls.patch\":\n\ntype DefaultObjectAccessControlsPatchCall struct {\n\ts                   *Service\n\tbucket              string\n\tentity              string\n\tobjectaccesscontrol *ObjectAccessControl\n\topt_                map[string]interface{}\n}\n\n// Patch: Updates a default object ACL entry on the specified bucket.\n// This method supports patch semantics.\nfunc (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall {\n\tc := &DefaultObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\tc.objectaccesscontrol = objectaccesscontrol\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DefaultObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/defaultObjectAcl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PATCH\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates a default object ACL entry on the specified bucket. This method supports patch semantics.\",\n\t//   \"httpMethod\": \"PATCH\",\n\t//   \"id\": \"storage.defaultObjectAccessControls.patch\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.defaultObjectAccessControls.update\":\n\ntype DefaultObjectAccessControlsUpdateCall struct {\n\ts                   *Service\n\tbucket              string\n\tentity              string\n\tobjectaccesscontrol *ObjectAccessControl\n\topt_                map[string]interface{}\n}\n\n// Update: Updates a default object ACL entry on the specified bucket.\nfunc (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall {\n\tc := &DefaultObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.entity = entity\n\tc.objectaccesscontrol = objectaccesscontrol\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *DefaultObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/defaultObjectAcl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates a default object ACL entry on the specified bucket.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"storage.defaultObjectAccessControls.update\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/defaultObjectAcl/{entity}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objectAccessControls.delete\":\n\ntype ObjectAccessControlsDeleteCall struct {\n\ts      *Service\n\tbucket string\n\tobject string\n\tentity string\n\topt_   map[string]interface{}\n}\n\n// Delete: Permanently deletes the ACL entry for the specified entity on\n// the specified object.\nfunc (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall {\n\tc := &ObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\tc.entity = entity\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectAccessControlsDeleteCall) Do() error {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Permanently deletes the ACL entry for the specified entity on the specified object.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"storage.objectAccessControls.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objectAccessControls.get\":\n\ntype ObjectAccessControlsGetCall struct {\n\ts      *Service\n\tbucket string\n\tobject string\n\tentity string\n\topt_   map[string]interface{}\n}\n\n// Get: Returns the ACL entry for the specified entity on the specified\n// object.\nfunc (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall {\n\tc := &ObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\tc.entity = entity\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Returns the ACL entry for the specified entity on the specified object.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.objectAccessControls.get\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objectAccessControls.insert\":\n\ntype ObjectAccessControlsInsertCall struct {\n\ts                   *Service\n\tbucket              string\n\tobject              string\n\tobjectaccesscontrol *ObjectAccessControl\n\topt_                map[string]interface{}\n}\n\n// Insert: Creates a new ACL entry on the specified object.\nfunc (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall {\n\tc := &ObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\tc.objectaccesscontrol = objectaccesscontrol\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}/acl\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Creates a new ACL entry on the specified object.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.objectAccessControls.insert\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}/acl\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objectAccessControls.list\":\n\ntype ObjectAccessControlsListCall struct {\n\ts      *Service\n\tbucket string\n\tobject string\n\topt_   map[string]interface{}\n}\n\n// List: Retrieves ACL entries on the specified object.\nfunc (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall {\n\tc := &ObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}/acl\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControls\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves ACL entries on the specified object.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.objectAccessControls.list\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}/acl\",\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControls\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objectAccessControls.patch\":\n\ntype ObjectAccessControlsPatchCall struct {\n\ts                   *Service\n\tbucket              string\n\tobject              string\n\tentity              string\n\tobjectaccesscontrol *ObjectAccessControl\n\topt_                map[string]interface{}\n}\n\n// Patch: Updates an ACL entry on the specified object. This method\n// supports patch semantics.\nfunc (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {\n\tc := &ObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\tc.entity = entity\n\tc.objectaccesscontrol = objectaccesscontrol\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PATCH\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates an ACL entry on the specified object. This method supports patch semantics.\",\n\t//   \"httpMethod\": \"PATCH\",\n\t//   \"id\": \"storage.objectAccessControls.patch\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objectAccessControls.update\":\n\ntype ObjectAccessControlsUpdateCall struct {\n\ts                   *Service\n\tbucket              string\n\tobject              string\n\tentity              string\n\tobjectaccesscontrol *ObjectAccessControl\n\topt_                map[string]interface{}\n}\n\n// Update: Updates an ACL entry on the specified object.\nfunc (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall {\n\tc := &ObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\tc.entity = entity\n\tc.objectaccesscontrol = objectaccesscontrol\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}/acl/{entity}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t\t\"entity\": c.entity,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *ObjectAccessControl\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates an ACL entry on the specified object.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"storage.objectAccessControls.update\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\",\n\t//     \"entity\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of a bucket.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"entity\": {\n\t//       \"description\": \"The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}/acl/{entity}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"ObjectAccessControl\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objects.compose\":\n\ntype ObjectsComposeCall struct {\n\ts                 *Service\n\tdestinationBucket string\n\tdestinationObject string\n\tcomposerequest    *ComposeRequest\n\topt_              map[string]interface{}\n}\n\n// Compose: Concatenates a list of existing objects into a new object in\n// the same bucket.\nfunc (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall {\n\tc := &ObjectsComposeCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.destinationBucket = destinationBucket\n\tc.destinationObject = destinationObject\n\tc.composerequest = composerequest\n\treturn c\n}\n\n// DestinationPredefinedAcl sets the optional parameter\n// \"destinationPredefinedAcl\": Apply a predefined set of access controls\n// to the destination object.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall {\n\tc.opt_[\"destinationPredefinedAcl\"] = destinationPredefinedAcl\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the object's current\n// generation matches the given value.\nfunc (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the object's current metageneration matches the given value.\nfunc (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsComposeCall) Do() (*Object, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"destinationPredefinedAcl\"]; ok {\n\t\tparams.Set(\"destinationPredefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{destinationBucket}/o/{destinationObject}/compose\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"destinationBucket\": c.destinationBucket,\n\t\t\"destinationObject\": c.destinationObject,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Object\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Concatenates a list of existing objects into a new object in the same bucket.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.objects.compose\",\n\t//   \"parameterOrder\": [\n\t//     \"destinationBucket\",\n\t//     \"destinationObject\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"destinationBucket\": {\n\t//       \"description\": \"Name of the bucket in which to store the new object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"destinationObject\": {\n\t//       \"description\": \"Name of the new object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"destinationPredefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to the destination object.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{destinationBucket}/o/{destinationObject}/compose\",\n\t//   \"request\": {\n\t//     \"$ref\": \"ComposeRequest\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsMediaDownload\": true\n\t// }\n\n}\n\n// method id \"storage.objects.copy\":\n\ntype ObjectsCopyCall struct {\n\ts                 *Service\n\tsourceBucket      string\n\tsourceObject      string\n\tdestinationBucket string\n\tdestinationObject string\n\tobject            *Object\n\topt_              map[string]interface{}\n}\n\n// Copy: Copies a source object to a destination object. Optionally\n// overrides metadata.\nfunc (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall {\n\tc := &ObjectsCopyCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.sourceBucket = sourceBucket\n\tc.sourceObject = sourceObject\n\tc.destinationBucket = destinationBucket\n\tc.destinationObject = destinationObject\n\tc.object = object\n\treturn c\n}\n\n// DestinationPredefinedAcl sets the optional parameter\n// \"destinationPredefinedAcl\": Apply a predefined set of access controls\n// to the destination object.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall {\n\tc.opt_[\"destinationPredefinedAcl\"] = destinationPredefinedAcl\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the destination object's\n// current generation matches the given value.\nfunc (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfGenerationNotMatch sets the optional parameter\n// \"ifGenerationNotMatch\": Makes the operation conditional on whether\n// the destination object's current generation does not match the given\n// value.\nfunc (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifGenerationNotMatch\"] = ifGenerationNotMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the destination object's current metageneration matches the given\n// value.\nfunc (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the operation conditional on\n// whether the destination object's current metageneration does not\n// match the given value.\nfunc (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// IfSourceGenerationMatch sets the optional parameter\n// \"ifSourceGenerationMatch\": Makes the operation conditional on whether\n// the source object's generation matches the given value.\nfunc (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifSourceGenerationMatch\"] = ifSourceGenerationMatch\n\treturn c\n}\n\n// IfSourceGenerationNotMatch sets the optional parameter\n// \"ifSourceGenerationNotMatch\": Makes the operation conditional on\n// whether the source object's generation does not match the given\n// value.\nfunc (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifSourceGenerationNotMatch\"] = ifSourceGenerationNotMatch\n\treturn c\n}\n\n// IfSourceMetagenerationMatch sets the optional parameter\n// \"ifSourceMetagenerationMatch\": Makes the operation conditional on\n// whether the source object's current metageneration matches the given\n// value.\nfunc (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifSourceMetagenerationMatch\"] = ifSourceMetagenerationMatch\n\treturn c\n}\n\n// IfSourceMetagenerationNotMatch sets the optional parameter\n// \"ifSourceMetagenerationNotMatch\": Makes the operation conditional on\n// whether the source object's current metageneration does not match the\n// given value.\nfunc (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall {\n\tc.opt_[\"ifSourceMetagenerationNotMatch\"] = ifSourceMetagenerationNotMatch\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl, unless the object resource\n// specifies the acl property, when it defaults to full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// SourceGeneration sets the optional parameter \"sourceGeneration\": If\n// present, selects a specific revision of the source object (as opposed\n// to the latest version, the default).\nfunc (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall {\n\tc.opt_[\"sourceGeneration\"] = sourceGeneration\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsCopyCall) Do() (*Object, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.object)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"destinationPredefinedAcl\"]; ok {\n\t\tparams.Set(\"destinationPredefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifSourceGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifSourceGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifSourceMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifSourceMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"sourceGeneration\"]; ok {\n\t\tparams.Set(\"sourceGeneration\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"sourceBucket\":      c.sourceBucket,\n\t\t\"sourceObject\":      c.sourceObject,\n\t\t\"destinationBucket\": c.destinationBucket,\n\t\t\"destinationObject\": c.destinationObject,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Object\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Copies a source object to a destination object. Optionally overrides metadata.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.objects.copy\",\n\t//   \"parameterOrder\": [\n\t//     \"sourceBucket\",\n\t//     \"sourceObject\",\n\t//     \"destinationBucket\",\n\t//     \"destinationObject\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"destinationBucket\": {\n\t//       \"description\": \"Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"destinationObject\": {\n\t//       \"description\": \"Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"destinationPredefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to the destination object.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"sourceBucket\": {\n\t//       \"description\": \"Name of the bucket in which to find the source object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"sourceGeneration\": {\n\t//       \"description\": \"If present, selects a specific revision of the source object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"sourceObject\": {\n\t//       \"description\": \"Name of the source object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsMediaDownload\": true\n\t// }\n\n}\n\n// method id \"storage.objects.delete\":\n\ntype ObjectsDeleteCall struct {\n\ts      *Service\n\tbucket string\n\tobject string\n\topt_   map[string]interface{}\n}\n\n// Delete: Deletes an object and its metadata. Deletions are permanent\n// if versioning is not enabled for the bucket, or if the generation\n// parameter is used.\nfunc (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall {\n\tc := &ObjectsDeleteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// permanently deletes a specific revision of this object (as opposed to\n// the latest version, the default).\nfunc (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the object's current\n// generation matches the given value.\nfunc (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfGenerationNotMatch sets the optional parameter\n// \"ifGenerationNotMatch\": Makes the operation conditional on whether\n// the object's current generation does not match the given value.\nfunc (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall {\n\tc.opt_[\"ifGenerationNotMatch\"] = ifGenerationNotMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the object's current metageneration matches the given value.\nfunc (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the operation conditional on\n// whether the object's current metageneration does not match the given\n// value.\nfunc (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsDeleteCall) Do() error {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"DELETE\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\t// {\n\t//   \"description\": \"Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.\",\n\t//   \"httpMethod\": \"DELETE\",\n\t//   \"id\": \"storage.objects.delete\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of the bucket in which the object resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}\",\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objects.get\":\n\ntype ObjectsGetCall struct {\n\ts      *Service\n\tbucket string\n\tobject string\n\topt_   map[string]interface{}\n}\n\n// Get: Retrieves an object or its metadata.\nfunc (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall {\n\tc := &ObjectsGetCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the object's generation\n// matches the given value.\nfunc (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfGenerationNotMatch sets the optional parameter\n// \"ifGenerationNotMatch\": Makes the operation conditional on whether\n// the object's generation does not match the given value.\nfunc (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall {\n\tc.opt_[\"ifGenerationNotMatch\"] = ifGenerationNotMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the object's current metageneration matches the given value.\nfunc (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the operation conditional on\n// whether the object's current metageneration does not match the given\n// value.\nfunc (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsGetCall) Do() (*Object, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Object\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves an object or its metadata.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.objects.get\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of the bucket in which the object resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_only\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsMediaDownload\": true\n\t// }\n\n}\n\n// method id \"storage.objects.insert\":\n\ntype ObjectsInsertCall struct {\n\ts          *Service\n\tbucket     string\n\tobject     *Object\n\topt_       map[string]interface{}\n\tmedia_     io.Reader\n\tresumable_ googleapi.SizeReaderAt\n\tmediaType_ string\n\tctx_       context.Context\n\tprotocol_  string\n}\n\n// Insert: Stores a new object and metadata.\nfunc (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall {\n\tc := &ObjectsInsertCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\treturn c\n}\n\n// ContentEncoding sets the optional parameter \"contentEncoding\": If\n// set, sets the contentEncoding property of the final object to this\n// value. Setting this parameter is equivalent to setting the\n// contentEncoding metadata property. This can be useful when uploading\n// an object with uploadType=media to indicate the encoding of the\n// content being uploaded.\nfunc (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall {\n\tc.opt_[\"contentEncoding\"] = contentEncoding\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the object's current\n// generation matches the given value.\nfunc (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfGenerationNotMatch sets the optional parameter\n// \"ifGenerationNotMatch\": Makes the operation conditional on whether\n// the object's current generation does not match the given value.\nfunc (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall {\n\tc.opt_[\"ifGenerationNotMatch\"] = ifGenerationNotMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the object's current metageneration matches the given value.\nfunc (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the operation conditional on\n// whether the object's current metageneration does not match the given\n// value.\nfunc (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// Name sets the optional parameter \"name\": Name of the object. Required\n// when the object metadata is not otherwise provided. Overrides the\n// object metadata's name value, if any.\nfunc (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall {\n\tc.opt_[\"name\"] = name\n\treturn c\n}\n\n// PredefinedAcl sets the optional parameter \"predefinedAcl\": Apply a\n// predefined set of access controls to this object.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall {\n\tc.opt_[\"predefinedAcl\"] = predefinedAcl\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl, unless the object resource\n// specifies the acl property, when it defaults to full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Media specifies the media to upload in a single chunk.\n// At most one of Media and ResumableMedia may be set.\nfunc (c *ObjectsInsertCall) Media(r io.Reader) *ObjectsInsertCall {\n\tc.media_ = r\n\tc.protocol_ = \"multipart\"\n\treturn c\n}\n\n// ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx.\n// At most one of Media and ResumableMedia may be set.\n// mediaType identifies the MIME media type of the upload, such as \"image/png\".\n// If mediaType is \"\", it will be auto-detected.\nfunc (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall {\n\tc.ctx_ = ctx\n\tc.resumable_ = io.NewSectionReader(r, 0, size)\n\tc.mediaType_ = mediaType\n\tc.protocol_ = \"resumable\"\n\treturn c\n}\n\n// ProgressUpdater provides a callback function that will be called after every chunk.\n// It should be a low-latency function in order to not slow down the upload operation.\n// This should only be called when using ResumableMedia (as opposed to Media).\nfunc (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall {\n\tc.opt_[\"progressUpdater\"] = pu\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsInsertCall) Do() (*Object, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.object)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"contentEncoding\"]; ok {\n\t\tparams.Set(\"contentEncoding\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"name\"]; ok {\n\t\tparams.Set(\"name\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedAcl\"]; ok {\n\t\tparams.Set(\"predefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o\")\n\tvar progressUpdater_ googleapi.ProgressUpdater\n\tif v, ok := c.opt_[\"progressUpdater\"]; ok {\n\t\tif pu, ok := v.(googleapi.ProgressUpdater); ok {\n\t\t\tprogressUpdater_ = pu\n\t\t}\n\t}\n\tif c.media_ != nil || c.resumable_ != nil {\n\t\turls = strings.Replace(urls, \"https://www.googleapis.com/\", \"https://www.googleapis.com/upload/\", 1)\n\t\tparams.Set(\"uploadType\", c.protocol_)\n\t}\n\turls += \"?\" + params.Encode()\n\tif c.protocol_ != \"resumable\" {\n\t\tvar cancel func()\n\t\tcancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype)\n\t\tif cancel != nil {\n\t\t\tdefer cancel()\n\t\t}\n\t}\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\tif c.protocol_ == \"resumable\" {\n\t\treq.ContentLength = 0\n\t\tif c.mediaType_ == \"\" {\n\t\t\tc.mediaType_ = googleapi.DetectMediaType(c.resumable_)\n\t\t}\n\t\treq.Header.Set(\"X-Upload-Content-Type\", c.mediaType_)\n\t\treq.Body = nil\n\t} else {\n\t\treq.Header.Set(\"Content-Type\", ctype)\n\t}\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tif c.protocol_ == \"resumable\" {\n\t\tloc := res.Header.Get(\"Location\")\n\t\trx := &googleapi.ResumableUpload{\n\t\t\tClient:        c.s.client,\n\t\t\tUserAgent:     c.s.userAgent(),\n\t\t\tURI:           loc,\n\t\t\tMedia:         c.resumable_,\n\t\t\tMediaType:     c.mediaType_,\n\t\t\tContentLength: c.resumable_.Size(),\n\t\t\tCallback:      progressUpdater_,\n\t\t}\n\t\tres, err = rx.Upload(c.ctx_)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer res.Body.Close()\n\t}\n\tvar ret *Object\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Stores a new object and metadata.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.objects.insert\",\n\t//   \"mediaUpload\": {\n\t//     \"accept\": [\n\t//       \"*/*\"\n\t//     ],\n\t//     \"protocols\": {\n\t//       \"resumable\": {\n\t//         \"multipart\": true,\n\t//         \"path\": \"/resumable/upload/storage/v1/b/{bucket}/o\"\n\t//       },\n\t//       \"simple\": {\n\t//         \"multipart\": true,\n\t//         \"path\": \"/upload/storage/v1/b/{bucket}/o\"\n\t//       }\n\t//     }\n\t//   },\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"contentEncoding\": {\n\t//       \"description\": \"If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"name\": {\n\t//       \"description\": \"Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to this object.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsMediaDownload\": true,\n\t//   \"supportsMediaUpload\": true\n\t// }\n\n}\n\n// method id \"storage.objects.list\":\n\ntype ObjectsListCall struct {\n\ts      *Service\n\tbucket string\n\topt_   map[string]interface{}\n}\n\n// List: Retrieves a list of objects matching the criteria.\nfunc (r *ObjectsService) List(bucket string) *ObjectsListCall {\n\tc := &ObjectsListCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\treturn c\n}\n\n// Delimiter sets the optional parameter \"delimiter\": Returns results in\n// a directory-like mode. items will contain only objects whose names,\n// aside from the prefix, do not contain delimiter. Objects whose names,\n// aside from the prefix, contain delimiter will have their name,\n// truncated after the delimiter, returned in prefixes. Duplicate\n// prefixes are omitted.\nfunc (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall {\n\tc.opt_[\"delimiter\"] = delimiter\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of items plus prefixes to return. As duplicate prefixes are omitted,\n// fewer total results may be returned than requested. The default value\n// of this parameter is 1,000 items.\nfunc (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": A\n// previously-returned page token representing part of the larger set of\n// results to view.\nfunc (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Prefix sets the optional parameter \"prefix\": Filter results to\n// objects whose names begin with this prefix.\nfunc (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall {\n\tc.opt_[\"prefix\"] = prefix\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Versions sets the optional parameter \"versions\": If true, lists all\n// versions of an object as distinct results. The default is false. For\n// more information, see Object Versioning.\nfunc (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall {\n\tc.opt_[\"versions\"] = versions\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsListCall) Do() (*Objects, error) {\n\tvar body io.Reader = nil\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"delimiter\"]; ok {\n\t\tparams.Set(\"delimiter\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"prefix\"]; ok {\n\t\tparams.Set(\"prefix\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"versions\"]; ok {\n\t\tparams.Set(\"versions\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"GET\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Objects\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Retrieves a list of objects matching the criteria.\",\n\t//   \"httpMethod\": \"GET\",\n\t//   \"id\": \"storage.objects.list\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of the bucket in which to look for objects.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"delimiter\": {\n\t//       \"description\": \"Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"minimum\": \"0\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"A previously-returned page token representing part of the larger set of results to view.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"prefix\": {\n\t//       \"description\": \"Filter results to objects whose names begin with this prefix.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"versions\": {\n\t//       \"description\": \"If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"boolean\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o\",\n\t//   \"response\": {\n\t//     \"$ref\": \"Objects\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_only\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsSubscription\": true\n\t// }\n\n}\n\n// method id \"storage.objects.patch\":\n\ntype ObjectsPatchCall struct {\n\ts       *Service\n\tbucket  string\n\tobject  string\n\tobject2 *Object\n\topt_    map[string]interface{}\n}\n\n// Patch: Updates an object's metadata. This method supports patch\n// semantics.\nfunc (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {\n\tc := &ObjectsPatchCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\tc.object2 = object2\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the object's current\n// generation matches the given value.\nfunc (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfGenerationNotMatch sets the optional parameter\n// \"ifGenerationNotMatch\": Makes the operation conditional on whether\n// the object's current generation does not match the given value.\nfunc (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall {\n\tc.opt_[\"ifGenerationNotMatch\"] = ifGenerationNotMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the object's current metageneration matches the given value.\nfunc (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the operation conditional on\n// whether the object's current metageneration does not match the given\n// value.\nfunc (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// PredefinedAcl sets the optional parameter \"predefinedAcl\": Apply a\n// predefined set of access controls to this object.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall {\n\tc.opt_[\"predefinedAcl\"] = predefinedAcl\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsPatchCall) Do() (*Object, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedAcl\"]; ok {\n\t\tparams.Set(\"predefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PATCH\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Object\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates an object's metadata. This method supports patch semantics.\",\n\t//   \"httpMethod\": \"PATCH\",\n\t//   \"id\": \"storage.objects.patch\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of the bucket in which the object resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to this object.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objects.rewrite\":\n\ntype ObjectsRewriteCall struct {\n\ts                 *Service\n\tsourceBucket      string\n\tsourceObject      string\n\tdestinationBucket string\n\tdestinationObject string\n\tobject            *Object\n\topt_              map[string]interface{}\n}\n\n// Rewrite: Rewrites a source object to a destination object. Optionally\n// overrides metadata.\nfunc (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall {\n\tc := &ObjectsRewriteCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.sourceBucket = sourceBucket\n\tc.sourceObject = sourceObject\n\tc.destinationBucket = destinationBucket\n\tc.destinationObject = destinationObject\n\tc.object = object\n\treturn c\n}\n\n// DestinationPredefinedAcl sets the optional parameter\n// \"destinationPredefinedAcl\": Apply a predefined set of access controls\n// to the destination object.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall {\n\tc.opt_[\"destinationPredefinedAcl\"] = destinationPredefinedAcl\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the destination object's\n// current generation matches the given value.\nfunc (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfGenerationNotMatch sets the optional parameter\n// \"ifGenerationNotMatch\": Makes the operation conditional on whether\n// the destination object's current generation does not match the given\n// value.\nfunc (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifGenerationNotMatch\"] = ifGenerationNotMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the destination object's current metageneration matches the given\n// value.\nfunc (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the operation conditional on\n// whether the destination object's current metageneration does not\n// match the given value.\nfunc (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// IfSourceGenerationMatch sets the optional parameter\n// \"ifSourceGenerationMatch\": Makes the operation conditional on whether\n// the source object's generation matches the given value.\nfunc (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifSourceGenerationMatch\"] = ifSourceGenerationMatch\n\treturn c\n}\n\n// IfSourceGenerationNotMatch sets the optional parameter\n// \"ifSourceGenerationNotMatch\": Makes the operation conditional on\n// whether the source object's generation does not match the given\n// value.\nfunc (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifSourceGenerationNotMatch\"] = ifSourceGenerationNotMatch\n\treturn c\n}\n\n// IfSourceMetagenerationMatch sets the optional parameter\n// \"ifSourceMetagenerationMatch\": Makes the operation conditional on\n// whether the source object's current metageneration matches the given\n// value.\nfunc (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifSourceMetagenerationMatch\"] = ifSourceMetagenerationMatch\n\treturn c\n}\n\n// IfSourceMetagenerationNotMatch sets the optional parameter\n// \"ifSourceMetagenerationNotMatch\": Makes the operation conditional on\n// whether the source object's current metageneration does not match the\n// given value.\nfunc (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall {\n\tc.opt_[\"ifSourceMetagenerationNotMatch\"] = ifSourceMetagenerationNotMatch\n\treturn c\n}\n\n// MaxBytesRewrittenPerCall sets the optional parameter\n// \"maxBytesRewrittenPerCall\": The maximum number of bytes that will be\n// rewritten per rewrite request. Most callers shouldn't need to specify\n// this parameter - it is primarily in place to support testing. If\n// specified the value must be an integral multiple of 1 MiB (1048576).\n// Also, this only applies to requests where the source and destination\n// span locations and/or storage classes. Finally, this value must not\n// change across rewrite calls else you'll get an error that the\n// rewriteToken is invalid.\nfunc (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall {\n\tc.opt_[\"maxBytesRewrittenPerCall\"] = maxBytesRewrittenPerCall\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl, unless the object resource\n// specifies the acl property, when it defaults to full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// RewriteToken sets the optional parameter \"rewriteToken\": Include this\n// field (from the previous rewrite response) on each rewrite request\n// after the first one, until the rewrite response 'done' flag is true.\n// Calls that provide a rewriteToken can omit all other request fields,\n// but if included those fields must match the values provided in the\n// first rewrite request.\nfunc (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall {\n\tc.opt_[\"rewriteToken\"] = rewriteToken\n\treturn c\n}\n\n// SourceGeneration sets the optional parameter \"sourceGeneration\": If\n// present, selects a specific revision of the source object (as opposed\n// to the latest version, the default).\nfunc (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall {\n\tc.opt_[\"sourceGeneration\"] = sourceGeneration\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsRewriteCall) Do() (*RewriteResponse, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.object)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"destinationPredefinedAcl\"]; ok {\n\t\tparams.Set(\"destinationPredefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifSourceGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifSourceGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifSourceMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifSourceMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifSourceMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"maxBytesRewrittenPerCall\"]; ok {\n\t\tparams.Set(\"maxBytesRewrittenPerCall\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"rewriteToken\"]; ok {\n\t\tparams.Set(\"rewriteToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"sourceGeneration\"]; ok {\n\t\tparams.Set(\"sourceGeneration\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"sourceBucket\":      c.sourceBucket,\n\t\t\"sourceObject\":      c.sourceObject,\n\t\t\"destinationBucket\": c.destinationBucket,\n\t\t\"destinationObject\": c.destinationObject,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *RewriteResponse\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Rewrites a source object to a destination object. Optionally overrides metadata.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.objects.rewrite\",\n\t//   \"parameterOrder\": [\n\t//     \"sourceBucket\",\n\t//     \"sourceObject\",\n\t//     \"destinationBucket\",\n\t//     \"destinationObject\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"destinationBucket\": {\n\t//       \"description\": \"Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"destinationObject\": {\n\t//       \"description\": \"Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"destinationPredefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to the destination object.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the destination object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifSourceMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the source object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"maxBytesRewrittenPerCall\": {\n\t//       \"description\": \"The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"rewriteToken\": {\n\t//       \"description\": \"Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"sourceBucket\": {\n\t//       \"description\": \"Name of the bucket in which to find the source object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"sourceGeneration\": {\n\t//       \"description\": \"If present, selects a specific revision of the source object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"sourceObject\": {\n\t//       \"description\": \"Name of the source object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"RewriteResponse\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ]\n\t// }\n\n}\n\n// method id \"storage.objects.update\":\n\ntype ObjectsUpdateCall struct {\n\ts       *Service\n\tbucket  string\n\tobject  string\n\tobject2 *Object\n\topt_    map[string]interface{}\n}\n\n// Update: Updates an object's metadata.\nfunc (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall {\n\tc := &ObjectsUpdateCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.object = object\n\tc.object2 = object2\n\treturn c\n}\n\n// Generation sets the optional parameter \"generation\": If present,\n// selects a specific revision of this object (as opposed to the latest\n// version, the default).\nfunc (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall {\n\tc.opt_[\"generation\"] = generation\n\treturn c\n}\n\n// IfGenerationMatch sets the optional parameter \"ifGenerationMatch\":\n// Makes the operation conditional on whether the object's current\n// generation matches the given value.\nfunc (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall {\n\tc.opt_[\"ifGenerationMatch\"] = ifGenerationMatch\n\treturn c\n}\n\n// IfGenerationNotMatch sets the optional parameter\n// \"ifGenerationNotMatch\": Makes the operation conditional on whether\n// the object's current generation does not match the given value.\nfunc (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall {\n\tc.opt_[\"ifGenerationNotMatch\"] = ifGenerationNotMatch\n\treturn c\n}\n\n// IfMetagenerationMatch sets the optional parameter\n// \"ifMetagenerationMatch\": Makes the operation conditional on whether\n// the object's current metageneration matches the given value.\nfunc (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall {\n\tc.opt_[\"ifMetagenerationMatch\"] = ifMetagenerationMatch\n\treturn c\n}\n\n// IfMetagenerationNotMatch sets the optional parameter\n// \"ifMetagenerationNotMatch\": Makes the operation conditional on\n// whether the object's current metageneration does not match the given\n// value.\nfunc (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall {\n\tc.opt_[\"ifMetagenerationNotMatch\"] = ifMetagenerationNotMatch\n\treturn c\n}\n\n// PredefinedAcl sets the optional parameter \"predefinedAcl\": Apply a\n// predefined set of access controls to this object.\n//\n// Possible values:\n//   \"authenticatedRead\" - Object owner gets OWNER access, and\n// allAuthenticatedUsers get READER access.\n//   \"bucketOwnerFullControl\" - Object owner gets OWNER access, and\n// project team owners get OWNER access.\n//   \"bucketOwnerRead\" - Object owner gets OWNER access, and project\n// team owners get READER access.\n//   \"private\" - Object owner gets OWNER access.\n//   \"projectPrivate\" - Object owner gets OWNER access, and project team\n// members get access according to their roles.\n//   \"publicRead\" - Object owner gets OWNER access, and allUsers get\n// READER access.\nfunc (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall {\n\tc.opt_[\"predefinedAcl\"] = predefinedAcl\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to full.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsUpdateCall) Do() (*Object, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"generation\"]; ok {\n\t\tparams.Set(\"generation\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifGenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifGenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"ifMetagenerationNotMatch\"]; ok {\n\t\tparams.Set(\"ifMetagenerationNotMatch\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"predefinedAcl\"]; ok {\n\t\tparams.Set(\"predefinedAcl\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/{object}\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"PUT\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t\t\"object\": c.object,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Object\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Updates an object's metadata.\",\n\t//   \"httpMethod\": \"PUT\",\n\t//   \"id\": \"storage.objects.update\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\",\n\t//     \"object\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of the bucket in which the object resides.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"generation\": {\n\t//       \"description\": \"If present, selects a specific revision of this object (as opposed to the latest version, the default).\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifGenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current generation does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration matches the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"ifMetagenerationNotMatch\": {\n\t//       \"description\": \"Makes the operation conditional on whether the object's current metageneration does not match the given value.\",\n\t//       \"format\": \"int64\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"object\": {\n\t//       \"description\": \"Name of the object.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"predefinedAcl\": {\n\t//       \"description\": \"Apply a predefined set of access controls to this object.\",\n\t//       \"enum\": [\n\t//         \"authenticatedRead\",\n\t//         \"bucketOwnerFullControl\",\n\t//         \"bucketOwnerRead\",\n\t//         \"private\",\n\t//         \"projectPrivate\",\n\t//         \"publicRead\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Object owner gets OWNER access, and allAuthenticatedUsers get READER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team owners get READER access.\",\n\t//         \"Object owner gets OWNER access.\",\n\t//         \"Object owner gets OWNER access, and project team members get access according to their roles.\",\n\t//         \"Object owner gets OWNER access, and allUsers get READER access.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to full.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/{object}\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Object\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsMediaDownload\": true\n\t// }\n\n}\n\n// method id \"storage.objects.watchAll\":\n\ntype ObjectsWatchAllCall struct {\n\ts       *Service\n\tbucket  string\n\tchannel *Channel\n\topt_    map[string]interface{}\n}\n\n// WatchAll: Watch for changes on all objects in a bucket.\nfunc (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall {\n\tc := &ObjectsWatchAllCall{s: r.s, opt_: make(map[string]interface{})}\n\tc.bucket = bucket\n\tc.channel = channel\n\treturn c\n}\n\n// Delimiter sets the optional parameter \"delimiter\": Returns results in\n// a directory-like mode. items will contain only objects whose names,\n// aside from the prefix, do not contain delimiter. Objects whose names,\n// aside from the prefix, contain delimiter will have their name,\n// truncated after the delimiter, returned in prefixes. Duplicate\n// prefixes are omitted.\nfunc (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall {\n\tc.opt_[\"delimiter\"] = delimiter\n\treturn c\n}\n\n// MaxResults sets the optional parameter \"maxResults\": Maximum number\n// of items plus prefixes to return. As duplicate prefixes are omitted,\n// fewer total results may be returned than requested. The default value\n// of this parameter is 1,000 items.\nfunc (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall {\n\tc.opt_[\"maxResults\"] = maxResults\n\treturn c\n}\n\n// PageToken sets the optional parameter \"pageToken\": A\n// previously-returned page token representing part of the larger set of\n// results to view.\nfunc (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall {\n\tc.opt_[\"pageToken\"] = pageToken\n\treturn c\n}\n\n// Prefix sets the optional parameter \"prefix\": Filter results to\n// objects whose names begin with this prefix.\nfunc (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall {\n\tc.opt_[\"prefix\"] = prefix\n\treturn c\n}\n\n// Projection sets the optional parameter \"projection\": Set of\n// properties to return. Defaults to noAcl.\n//\n// Possible values:\n//   \"full\" - Include all properties.\n//   \"noAcl\" - Omit the acl property.\nfunc (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall {\n\tc.opt_[\"projection\"] = projection\n\treturn c\n}\n\n// Versions sets the optional parameter \"versions\": If true, lists all\n// versions of an object as distinct results. The default is false. For\n// more information, see Object Versioning.\nfunc (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall {\n\tc.opt_[\"versions\"] = versions\n\treturn c\n}\n\n// Fields allows partial responses to be retrieved.\n// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse\n// for more information.\nfunc (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall {\n\tc.opt_[\"fields\"] = googleapi.CombineFields(s)\n\treturn c\n}\n\nfunc (c *ObjectsWatchAllCall) Do() (*Channel, error) {\n\tvar body io.Reader = nil\n\tbody, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctype := \"application/json\"\n\tparams := make(url.Values)\n\tparams.Set(\"alt\", \"json\")\n\tif v, ok := c.opt_[\"delimiter\"]; ok {\n\t\tparams.Set(\"delimiter\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"maxResults\"]; ok {\n\t\tparams.Set(\"maxResults\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"pageToken\"]; ok {\n\t\tparams.Set(\"pageToken\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"prefix\"]; ok {\n\t\tparams.Set(\"prefix\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"projection\"]; ok {\n\t\tparams.Set(\"projection\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"versions\"]; ok {\n\t\tparams.Set(\"versions\", fmt.Sprintf(\"%v\", v))\n\t}\n\tif v, ok := c.opt_[\"fields\"]; ok {\n\t\tparams.Set(\"fields\", fmt.Sprintf(\"%v\", v))\n\t}\n\turls := googleapi.ResolveRelative(c.s.BasePath, \"b/{bucket}/o/watch\")\n\turls += \"?\" + params.Encode()\n\treq, _ := http.NewRequest(\"POST\", urls, body)\n\tgoogleapi.Expand(req.URL, map[string]string{\n\t\t\"bucket\": c.bucket,\n\t})\n\treq.Header.Set(\"Content-Type\", ctype)\n\treq.Header.Set(\"User-Agent\", c.s.userAgent())\n\tres, err := c.s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer googleapi.CloseBody(res)\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret *Channel\n\tif err := json.NewDecoder(res.Body).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n\t// {\n\t//   \"description\": \"Watch for changes on all objects in a bucket.\",\n\t//   \"httpMethod\": \"POST\",\n\t//   \"id\": \"storage.objects.watchAll\",\n\t//   \"parameterOrder\": [\n\t//     \"bucket\"\n\t//   ],\n\t//   \"parameters\": {\n\t//     \"bucket\": {\n\t//       \"description\": \"Name of the bucket in which to look for objects.\",\n\t//       \"location\": \"path\",\n\t//       \"required\": true,\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"delimiter\": {\n\t//       \"description\": \"Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"maxResults\": {\n\t//       \"description\": \"Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.\",\n\t//       \"format\": \"uint32\",\n\t//       \"location\": \"query\",\n\t//       \"minimum\": \"0\",\n\t//       \"type\": \"integer\"\n\t//     },\n\t//     \"pageToken\": {\n\t//       \"description\": \"A previously-returned page token representing part of the larger set of results to view.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"prefix\": {\n\t//       \"description\": \"Filter results to objects whose names begin with this prefix.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"projection\": {\n\t//       \"description\": \"Set of properties to return. Defaults to noAcl.\",\n\t//       \"enum\": [\n\t//         \"full\",\n\t//         \"noAcl\"\n\t//       ],\n\t//       \"enumDescriptions\": [\n\t//         \"Include all properties.\",\n\t//         \"Omit the acl property.\"\n\t//       ],\n\t//       \"location\": \"query\",\n\t//       \"type\": \"string\"\n\t//     },\n\t//     \"versions\": {\n\t//       \"description\": \"If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.\",\n\t//       \"location\": \"query\",\n\t//       \"type\": \"boolean\"\n\t//     }\n\t//   },\n\t//   \"path\": \"b/{bucket}/o/watch\",\n\t//   \"request\": {\n\t//     \"$ref\": \"Channel\",\n\t//     \"parameterName\": \"resource\"\n\t//   },\n\t//   \"response\": {\n\t//     \"$ref\": \"Channel\"\n\t//   },\n\t//   \"scopes\": [\n\t//     \"https://www.googleapis.com/auth/cloud-platform\",\n\t//     \"https://www.googleapis.com/auth/devstorage.full_control\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_only\",\n\t//     \"https://www.googleapis.com/auth/devstorage.read_write\"\n\t//   ],\n\t//   \"supportsSubscription\": true\n\t// }\n\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/.travis.yml",
    "content": "language: go\nsudo: false\n\ngo:\n  - 1.4\n\ninstall:\n  - go get -v -t -d google.golang.org/appengine/...\n  - mkdir sdk\n  - curl -o sdk.zip \"https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip\"\n  - unzip sdk.zip -d sdk\n  - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py\n\nscript:\n  - go version\n  - go test -v google.golang.org/appengine/...\n  - go test -v -race google.golang.org/appengine/...\n  - sdk/go_appengine/goapp test -v google.golang.org/appengine/...\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/README.md",
    "content": "# Go App Engine packages\n\n[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)\n\nThis repository supports the Go runtime on App Engine,\nincluding both classic App Engine and Managed VMs.\nIt provides APIs for interacting with App Engine services.\nIts canonical import path is `google.golang.org/appengine`.\n\nSee https://cloud.google.com/appengine/docs/go/\nfor more information.\n\nFile issue reports and feature requests on the [Google App Engine issue\ntracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect).\n\n## Directory structure\nThe top level directory of this repository is the `appengine` package. It\ncontains the\nbasic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API\npackages are in subdirectories (e.g. `datastore`).\n\nThere is an `internal` subdirectory that contains service protocol buffers,\nplus packages required for connectivity to make API calls. App Engine apps\nshould not directly import any package under `internal`.\n\n## Updating a Go App Engine app\n\nThis section describes how to update a traditional Go App Engine app to use\nthese packages.\n\n### 1. Update YAML files (Managed VMs only)\n\nThe `app.yaml` file (and YAML files for modules) should have these new lines added:\n```\nvm: true\nmanual_scaling:\n  instances: 1\n```\nSee https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.\n\n### 2. Update import paths\n\nThe import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.\nYou will need to update your code to use import paths starting with that; for instance,\ncode importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.\nYou can do that manually, or by running this command to recursively update all Go source files in the current directory:\n(may require GNU sed)\n```\nsed -i '/\"appengine/{s,\"appengine,\"google.golang.org/appengine,;s,appengine_,appengine/,}' \\\n  $(find . -name '*.go')\n```\n\n### 3. Update code using deprecated, removed or modified APIs\n\nMost App Engine services are available with exactly the same API.\nA few APIs were cleaned up, and some are not available yet.\nThis list summarises the differences:\n\n* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.\n* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.\n* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.\n* `appengine.Datacenter` now takes a `context.Context` argument.\n* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.\n* `delay.Call` now returns an error.\n* `search.FieldLoadSaver` now handles document metadata.\n* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the\n  `context.Context` instead.\n* `aetest` no longer declares its own Context type, and uses the standard one instead.\n* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been\n  deprecated and unused for a long time.\n* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.\n  Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.\n* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.\n  Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.\n* `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead.\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/aetest/doc.go",
    "content": "/*\nPackage aetest provides an API for running dev_appserver for use in tests.\n\nAn example test file:\n\n\tpackage foo_test\n\n\timport (\n\t\t\"testing\"\n\n\t\t\"google.golang.org/appengine/memcache\"\n\t\t\"google.golang.org/appengine/aetest\"\n\t)\n\n\tfunc TestFoo(t *testing.T) {\n\t\tctx, done, err := aetest.NewContext()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer done()\n\n\t\tit := &memcache.Item{\n\t\t\tKey:   \"some-key\",\n\t\t\tValue: []byte(\"some-value\"),\n\t\t}\n\t\terr = memcache.Set(ctx, it)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Set err: %v\", err)\n\t\t}\n\t\tit, err = memcache.Get(ctx, \"some-key\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Get err: %v; want no error\", err)\n\t\t}\n\t\tif g, w := string(it.Value), \"some-value\" ; g != w {\n\t\t\tt.Errorf(\"retrieved Item.Value = %q, want %q\", g, w)\n\t\t}\n\t}\n\nThe environment variable APPENGINE_DEV_APPSERVER specifies the location of the\ndev_appserver.py executable to use. If unset, the system PATH is consulted.\n*/\npackage aetest\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/aetest/instance.go",
    "content": "package aetest\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/appengine\"\n)\n\n// Instance represents a running instance of the development API Server.\ntype Instance interface {\n\t// Close kills the child api_server.py process, releasing its resources.\n\tio.Closer\n\t// NewRequest returns an *http.Request associated with this instance.\n\tNewRequest(method, urlStr string, body io.Reader) (*http.Request, error)\n}\n\n// Options is used to specify options when creating an Instance.\ntype Options struct {\n\t// AppID specifies the App ID to use during tests.\n\t// By default, \"testapp\".\n\tAppID string\n\t// StronglyConsistentDatastore is whether the local datastore should be\n\t// strongly consistent. This will diverge from production behaviour.\n\tStronglyConsistentDatastore bool\n}\n\n// NewContext starts an instance of the development API server, and returns\n// a context that will route all API calls to that server, as well as a\n// closure that must be called when the Context is no longer required.\nfunc NewContext() (context.Context, func(), error) {\n\tinst, err := NewInstance(nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := inst.NewRequest(\"GET\", \"/\", nil)\n\tif err != nil {\n\t\tinst.Close()\n\t\treturn nil, nil, err\n\t}\n\tctx := appengine.NewContext(req)\n\treturn ctx, func() {\n\t\tinst.Close()\n\t}, nil\n}\n\n// PrepareDevAppserver is a hook which, if set, will be called before the\n// dev_appserver.py is started, each time it is started. If aetest.NewContext\n// is invoked from the goapp test tool, this hook is unnecessary.\nvar PrepareDevAppserver func() error\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/aetest/instance_classic.go",
    "content": "// +build appengine\n\npackage aetest\n\nimport \"appengine/aetest\"\n\n// NewInstance launches a running instance of api_server.py which can be used\n// for multiple test Contexts that delegate all App Engine API calls to that\n// instance.\n// If opts is nil the default values are used.\nfunc NewInstance(opts *Options) (Instance, error) {\n\taetest.PrepareDevAppserver = PrepareDevAppserver\n\tvar aeOpts *aetest.Options\n\tif opts != nil {\n\t\taeOpts = &aetest.Options{\n\t\t\tAppID: opts.AppID,\n\t\t\tStronglyConsistentDatastore: opts.StronglyConsistentDatastore,\n\t\t}\n\t}\n\treturn aetest.NewInstance(aeOpts)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/aetest/instance_vm.go",
    "content": "// +build !appengine\n\npackage aetest\n\nimport (\n\t\"bufio\"\n\t\"crypto/rand\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/appengine/internal\"\n)\n\n// NewInstance launches a running instance of api_server.py which can be used\n// for multiple test Contexts that delegate all App Engine API calls to that\n// instance.\n// If opts is nil the default values are used.\nfunc NewInstance(opts *Options) (Instance, error) {\n\ti := &instance{\n\t\topts:  opts,\n\t\tappID: \"testapp\",\n\t}\n\tif opts != nil && opts.AppID != \"\" {\n\t\ti.appID = opts.AppID\n\t}\n\tif err := i.startChild(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn i, nil\n}\n\nfunc newSessionID() string {\n\tvar buf [16]byte\n\tio.ReadFull(rand.Reader, buf[:])\n\treturn fmt.Sprintf(\"%x\", buf[:])\n}\n\n// instance implements the Instance interface.\ntype instance struct {\n\topts     *Options\n\tchild    *exec.Cmd\n\tapiURL   *url.URL // base URL of API HTTP server\n\tadminURL string   // base URL of admin HTTP server\n\tappDir   string\n\tappID    string\n\trelFuncs []func() // funcs to release any associated contexts\n}\n\n// NewRequest returns an *http.Request associated with this instance.\nfunc (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Associate this request.\n\trelease := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {\n\t\tctx = internal.WithAppIDOverride(ctx, \"dev~\"+i.appID)\n\t\treturn ctx\n\t})\n\ti.relFuncs = append(i.relFuncs, release)\n\n\treturn req, nil\n}\n\n// Close kills the child api_server.py process, releasing its resources.\nfunc (i *instance) Close() (err error) {\n\tfor _, rel := range i.relFuncs {\n\t\trel()\n\t}\n\ti.relFuncs = nil\n\tif i.child == nil {\n\t\treturn nil\n\t}\n\tdefer func() {\n\t\ti.child = nil\n\t\terr1 := os.RemoveAll(i.appDir)\n\t\tif err == nil {\n\t\t\terr = err1\n\t\t}\n\t}()\n\n\tif p := i.child.Process; p != nil {\n\t\terrc := make(chan error, 1)\n\t\tgo func() {\n\t\t\terrc <- i.child.Wait()\n\t\t}()\n\n\t\t// Call the quit handler on the admin server.\n\t\tres, err := http.Get(i.adminURL + \"/quit\")\n\t\tif err != nil {\n\t\t\tp.Kill()\n\t\t\treturn fmt.Errorf(\"unable to call /quit handler: %v\", err)\n\t\t}\n\t\tres.Body.Close()\n\n\t\tselect {\n\t\tcase <-time.After(15 * time.Second):\n\t\t\tp.Kill()\n\t\t\treturn errors.New(\"timeout killing child process\")\n\t\tcase err = <-errc:\n\t\t\t// Do nothing.\n\t\t}\n\t}\n\treturn\n}\n\nfunc fileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}\n\nfunc findPython() (path string, err error) {\n\tfor _, name := range []string{\"python2.7\", \"python\"} {\n\t\tpath, err = exec.LookPath(name)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc findDevAppserver() (string, error) {\n\tif p := os.Getenv(\"APPENGINE_DEV_APPSERVER\"); p != \"\" {\n\t\tif fileExists(p) {\n\t\t\treturn p, nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist\", p)\n\t}\n\treturn exec.LookPath(\"dev_appserver.py\")\n}\n\nvar apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\\S+)`)\nvar adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\\S+)`)\n\nfunc (i *instance) startChild() (err error) {\n\tif PrepareDevAppserver != nil {\n\t\tif err := PrepareDevAppserver(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpython, err := findPython()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find python interpreter: %v\", err)\n\t}\n\tdevAppserver, err := findDevAppserver()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find dev_appserver.py: %v\", err)\n\t}\n\n\ti.appDir, err = ioutil.TempDir(\"\", \"appengine-aetest\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tos.RemoveAll(i.appDir)\n\t\t}\n\t}()\n\terr = os.Mkdir(filepath.Join(i.appDir, \"app\"), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(filepath.Join(i.appDir, \"app\", \"app.yaml\"), []byte(i.appYAML()), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(filepath.Join(i.appDir, \"app\", \"stubapp.go\"), []byte(appSource), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tappserverArgs := []string{\n\t\tdevAppserver,\n\t\t\"--port=0\",\n\t\t\"--api_port=0\",\n\t\t\"--admin_port=0\",\n\t\t\"--automatic_restart=false\",\n\t\t\"--skip_sdk_update_check=true\",\n\t\t\"--clear_datastore=true\",\n\t\t\"--clear_search_indexes=true\",\n\t\t\"--datastore_path\", filepath.Join(i.appDir, \"datastore\"),\n\t}\n\tif i.opts != nil && i.opts.StronglyConsistentDatastore {\n\t\tappserverArgs = append(appserverArgs, \"--datastore_consistency_policy=consistent\")\n\t}\n\tappserverArgs = append(appserverArgs, filepath.Join(i.appDir, \"app\"))\n\n\ti.child = exec.Command(python,\n\t\tappserverArgs...,\n\t)\n\ti.child.Stdout = os.Stdout\n\tvar stderr io.Reader\n\tstderr, err = i.child.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tstderr = io.TeeReader(stderr, os.Stderr)\n\tif err = i.child.Start(); err != nil {\n\t\treturn err\n\t}\n\n\t// Read stderr until we have read the URLs of the API server and admin interface.\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\ts := bufio.NewScanner(stderr)\n\t\tfor s.Scan() {\n\t\t\tif match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {\n\t\t\t\tu, err := url.Parse(match[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrc <- fmt.Errorf(\"failed to parse API URL %q: %v\", match[1], err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ti.apiURL = u\n\t\t\t}\n\t\t\tif match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil {\n\t\t\t\ti.adminURL = match[1]\n\t\t\t}\n\t\t\tif i.adminURL != \"\" && i.apiURL != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\terrc <- s.Err()\n\t}()\n\n\tselect {\n\tcase <-time.After(15 * time.Second):\n\t\tif p := i.child.Process; p != nil {\n\t\t\tp.Kill()\n\t\t}\n\t\treturn errors.New(\"timeout starting child process\")\n\tcase err := <-errc:\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading child process stderr: %v\", err)\n\t\t}\n\t}\n\tif i.adminURL == \"\" {\n\t\treturn errors.New(\"unable to find admin server URL\")\n\t}\n\tif i.apiURL == nil {\n\t\treturn errors.New(\"unable to find API server URL\")\n\t}\n\treturn nil\n}\n\nfunc (i *instance) appYAML() string {\n\treturn fmt.Sprintf(appYAMLTemplate, i.appID)\n}\n\nconst appYAMLTemplate = `\napplication: %s\nversion: 1\nruntime: go\napi_version: go1\nvm: true\n\nhandlers:\n- url: /.*\n  script: _go_app\n`\n\nconst appSource = `\npackage main\nimport \"google.golang.org/appengine\"\nfunc main() { appengine.Main() }\n`\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/aetest/user.go",
    "content": "package aetest\n\nimport (\n\t\"hash/crc32\"\n\t\"net/http\"\n\t\"strconv\"\n\n\t\"google.golang.org/appengine/user\"\n)\n\n// Login causes the provided Request to act as though issued by the given user.\nfunc Login(u *user.User, req *http.Request) {\n\treq.Header.Set(\"X-AppEngine-User-Email\", u.Email)\n\tid := u.ID\n\tif id == \"\" {\n\t\tid = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable)))\n\t}\n\treq.Header.Set(\"X-AppEngine-User-Id\", id)\n\treq.Header.Set(\"X-AppEngine-User-Federated-Identity\", u.Email)\n\treq.Header.Set(\"X-AppEngine-User-Federated-Provider\", u.FederatedProvider)\n\tif u.Admin {\n\t\treq.Header.Set(\"X-AppEngine-User-Is-Admin\", \"1\")\n\t} else {\n\t\treq.Header.Set(\"X-AppEngine-User-Is-Admin\", \"0\")\n\t}\n}\n\n// Logout causes the provided Request to act as though issued by a logged-out\n// user.\nfunc Logout(req *http.Request) {\n\treq.Header.Del(\"X-AppEngine-User-Email\")\n\treq.Header.Del(\"X-AppEngine-User-Id\")\n\treq.Header.Del(\"X-AppEngine-User-Is-Admin\")\n\treq.Header.Del(\"X-AppEngine-User-Federated-Identity\")\n\treq.Header.Del(\"X-AppEngine-User-Federated-Provider\")\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/appengine.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package appengine provides basic functionality for Google App Engine.\n//\n// For more information on how to write Go apps for Google App Engine, see:\n// https://cloud.google.com/appengine/docs/go/\npackage appengine\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\n// IsDevAppServer reports whether the App Engine app is running in the\n// development App Server.\nfunc IsDevAppServer() bool {\n\treturn internal.IsDevAppServer()\n}\n\n// NewContext returns a context for an in-flight HTTP request.\n// This function is cheap.\nfunc NewContext(req *http.Request) context.Context {\n\treturn WithContext(context.Background(), req)\n}\n\n// WithContext returns a copy of the parent context\n// and associates it with an in-flight HTTP request.\n// This function is cheap.\nfunc WithContext(parent context.Context, req *http.Request) context.Context {\n\treturn internal.WithContext(parent, req)\n}\n\n// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.\n\n// BlobKey is a key for a blobstore blob.\n//\n// Conceptually, this type belongs in the blobstore package, but it lives in\n// the appengine package to avoid a circular dependency: blobstore depends on\n// datastore, and datastore needs to refer to the BlobKey type.\ntype BlobKey string\n\n// GeoPoint represents a location as latitude/longitude in degrees.\ntype GeoPoint struct {\n\tLat, Lng float64\n}\n\n// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.\nfunc (g GeoPoint) Valid() bool {\n\treturn -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180\n}\n\n// APICallFunc defines a function type for handling an API call.\n// See WithCallOverride.\ntype APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error\n\n// WithCallOverride returns a copy of the parent context\n// that will cause API calls to invoke f instead of their normal operation.\n//\n// This is intended for advanced users only.\nfunc WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {\n\treturn internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))\n}\n\n// APICall performs an API call.\n//\n// This is not intended for general use; it is exported for use in conjunction\n// with WithAPICallFunc.\nfunc APICall(ctx context.Context, service, method string, in, out proto.Message) error {\n\treturn internal.Call(ctx, service, method, in, out)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/appengine_vm.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build !appengine\n\npackage appengine\n\nimport (\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\n// The comment below must not be changed.\n// It is used by go-app-builder to recognise that this package has\n// the Main function to use in the synthetic main.\n//   The gophers party all night; the rabbits provide the beats.\n\n// Main is the principal entry point for a Managed VMs app.\n// It installs a trivial health checker if one isn't already registered,\n// and starts listening on port 8080 (overridden by the $PORT environment\n// variable).\n//\n// See https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#health_check_requests\n// for details on how to do your own health checking.\n//\n// Main never returns.\n//\n// Main is designed so that the app's main package looks like this:\n//\n//      package main\n//\n//      import (\n//              \"google.golang.org/appengine\"\n//\n//              _ \"myapp/package0\"\n//              _ \"myapp/package1\"\n//      )\n//\n//      func main() {\n//              appengine.Main()\n//      }\n//\n// The \"myapp/packageX\" packages are expected to register HTTP handlers\n// in their init functions.\nfunc Main() {\n\tinternal.Main()\n}\n\n// BackgroundContext returns a context not associated with a request.\n// This should only be used when not servicing a request.\n// This only works on Managed VMs.\nfunc BackgroundContext() context.Context {\n\treturn internal.BackgroundContext()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/blobstore/blobstore.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package blobstore provides a client for App Engine's persistent blob\n// storage service.\npackage blobstore\n\nimport (\n\t\"bufio\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"mime\"\n\t\"mime/multipart\"\n\t\"net/http\"\n\t\"net/textproto\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/datastore\"\n\t\"google.golang.org/appengine/internal\"\n\n\tbasepb \"google.golang.org/appengine/internal/base\"\n\tblobpb \"google.golang.org/appengine/internal/blobstore\"\n)\n\nconst (\n\tblobInfoKind      = \"__BlobInfo__\"\n\tblobFileIndexKind = \"__BlobFileIndex__\"\n\tzeroKey           = appengine.BlobKey(\"\")\n)\n\n// BlobInfo is the blob metadata that is stored in the datastore.\n// Filename may be empty.\ntype BlobInfo struct {\n\tBlobKey      appengine.BlobKey\n\tContentType  string    `datastore:\"content_type\"`\n\tCreationTime time.Time `datastore:\"creation\"`\n\tFilename     string    `datastore:\"filename\"`\n\tSize         int64     `datastore:\"size\"`\n\tMD5          string    `datastore:\"md5_hash\"`\n\n\t// ObjectName is the Google Cloud Storage name for this blob.\n\tObjectName string `datastore:\"gs_object_name\"`\n}\n\n// isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch.\n//\n// The blobstore stores blob metadata in the datastore. When loading that\n// metadata, it may contain fields that we don't care about. datastore.Get will\n// return datastore.ErrFieldMismatch in that case, so we ignore that specific\n// error.\nfunc isErrFieldMismatch(err error) bool {\n\t_, ok := err.(*datastore.ErrFieldMismatch)\n\treturn ok\n}\n\n// Stat returns the BlobInfo for a provided blobKey. If no blob was found for\n// that key, Stat returns datastore.ErrNoSuchEntity.\nfunc Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) {\n\tc, _ = appengine.Namespace(c, \"\") // Blobstore is always in the empty string namespace\n\tdskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil)\n\tbi := &BlobInfo{\n\t\tBlobKey: blobKey,\n\t}\n\tif err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) {\n\t\treturn nil, err\n\t}\n\treturn bi, nil\n}\n\n// Send sets the headers on response to instruct App Engine to send a blob as\n// the response body. This is more efficient than reading and writing it out\n// manually and isn't subject to normal response size limits.\nfunc Send(response http.ResponseWriter, blobKey appengine.BlobKey) {\n\thdr := response.Header()\n\thdr.Set(\"X-AppEngine-BlobKey\", string(blobKey))\n\n\tif hdr.Get(\"Content-Type\") == \"\" {\n\t\t// This value is known to dev_appserver to mean automatic.\n\t\t// In production this is remapped to the empty value which\n\t\t// means automatic.\n\t\thdr.Set(\"Content-Type\", \"application/vnd.google.appengine.auto\")\n\t}\n}\n\n// UploadURL creates an upload URL for the form that the user will\n// fill out, passing the application path to load when the POST of the\n// form is completed. These URLs expire and should not be reused. The\n// opts parameter may be nil.\nfunc UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) {\n\treq := &blobpb.CreateUploadURLRequest{\n\t\tSuccessPath: proto.String(successPath),\n\t}\n\tif opts != nil {\n\t\tif n := opts.MaxUploadBytes; n != 0 {\n\t\t\treq.MaxUploadSizeBytes = &n\n\t\t}\n\t\tif n := opts.MaxUploadBytesPerBlob; n != 0 {\n\t\t\treq.MaxUploadSizePerBlobBytes = &n\n\t\t}\n\t\tif s := opts.StorageBucket; s != \"\" {\n\t\t\treq.GsBucketName = &s\n\t\t}\n\t}\n\tres := &blobpb.CreateUploadURLResponse{}\n\tif err := internal.Call(c, \"blobstore\", \"CreateUploadURL\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn url.Parse(*res.Url)\n}\n\n// UploadURLOptions are the options to create an upload URL.\ntype UploadURLOptions struct {\n\tMaxUploadBytes        int64 // optional\n\tMaxUploadBytesPerBlob int64 // optional\n\n\t// StorageBucket specifies the Google Cloud Storage bucket in which\n\t// to store the blob.\n\t// This is required if you use Cloud Storage instead of Blobstore.\n\t// Your application must have permission to write to the bucket.\n\t// You may optionally specify a bucket name and path in the format\n\t// \"bucket_name/path\", in which case the included path will be the\n\t// prefix of the uploaded object's name.\n\tStorageBucket string\n}\n\n// Delete deletes a blob.\nfunc Delete(c context.Context, blobKey appengine.BlobKey) error {\n\treturn DeleteMulti(c, []appengine.BlobKey{blobKey})\n}\n\n// DeleteMulti deletes multiple blobs.\nfunc DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error {\n\ts := make([]string, len(blobKey))\n\tfor i, b := range blobKey {\n\t\ts[i] = string(b)\n\t}\n\treq := &blobpb.DeleteBlobRequest{\n\t\tBlobKey: s,\n\t}\n\tres := &basepb.VoidProto{}\n\tif err := internal.Call(c, \"blobstore\", \"DeleteBlob\", req, res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc errorf(format string, args ...interface{}) error {\n\treturn fmt.Errorf(\"blobstore: \"+format, args...)\n}\n\n// ParseUpload parses the synthetic POST request that your app gets from\n// App Engine after a user's successful upload of blobs. Given the request,\n// ParseUpload returns a map of the blobs received (keyed by HTML form\n// element name) and other non-blob POST parameters.\nfunc ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) {\n\t_, params, err := mime.ParseMediaType(req.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tboundary := params[\"boundary\"]\n\tif boundary == \"\" {\n\t\treturn nil, nil, errorf(\"did not find MIME multipart boundary\")\n\t}\n\n\tblobs = make(map[string][]*BlobInfo)\n\tother = make(url.Values)\n\n\tmreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader(\"\\r\\n\\r\\n\")), boundary)\n\tfor {\n\t\tpart, perr := mreader.NextPart()\n\t\tif perr == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif perr != nil {\n\t\t\treturn nil, nil, errorf(\"error reading next mime part with boundary %q (len=%d): %v\",\n\t\t\t\tboundary, len(boundary), perr)\n\t\t}\n\n\t\tbi := &BlobInfo{}\n\t\tctype, params, err := mime.ParseMediaType(part.Header.Get(\"Content-Disposition\"))\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tbi.Filename = params[\"filename\"]\n\t\tformKey := params[\"name\"]\n\n\t\tctype, params, err = mime.ParseMediaType(part.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tbi.BlobKey = appengine.BlobKey(params[\"blob-key\"])\n\t\tif ctype != \"message/external-body\" || bi.BlobKey == \"\" {\n\t\t\tif formKey != \"\" {\n\t\t\t\tslurp, serr := ioutil.ReadAll(part)\n\t\t\t\tif serr != nil {\n\t\t\t\t\treturn nil, nil, errorf(\"error reading %q MIME part\", formKey)\n\t\t\t\t}\n\t\t\t\tother[formKey] = append(other[formKey], string(slurp))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// App Engine sends a MIME header as the body of each MIME part.\n\t\ttp := textproto.NewReader(bufio.NewReader(part))\n\t\theader, mimeerr := tp.ReadMIMEHeader()\n\t\tif mimeerr != nil {\n\t\t\treturn nil, nil, mimeerr\n\t\t}\n\t\tbi.Size, err = strconv.ParseInt(header.Get(\"Content-Length\"), 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tbi.ContentType = header.Get(\"Content-Type\")\n\n\t\t// Parse the time from the MIME header like:\n\t\t// X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136\n\t\tcreateDate := header.Get(\"X-AppEngine-Upload-Creation\")\n\t\tif createDate == \"\" {\n\t\t\treturn nil, nil, errorf(\"expected to find an X-AppEngine-Upload-Creation header\")\n\t\t}\n\t\tbi.CreationTime, err = time.Parse(\"2006-01-02 15:04:05.000000\", createDate)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errorf(\"error parsing X-AppEngine-Upload-Creation: %s\", err)\n\t\t}\n\n\t\tif hdr := header.Get(\"Content-MD5\"); hdr != \"\" {\n\t\t\tmd5, err := base64.URLEncoding.DecodeString(hdr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errorf(\"bad Content-MD5 %q: %v\", hdr, err)\n\t\t\t}\n\t\t\tbi.MD5 = string(md5)\n\t\t}\n\n\t\t// If the GCS object name was provided, record it.\n\t\tbi.ObjectName = header.Get(\"X-AppEngine-Cloud-Storage-Object\")\n\n\t\tblobs[formKey] = append(blobs[formKey], bi)\n\t}\n\treturn\n}\n\n// Reader is a blob reader.\ntype Reader interface {\n\tio.Reader\n\tio.ReaderAt\n\tio.Seeker\n}\n\n// NewReader returns a reader for a blob. It always succeeds; if the blob does\n// not exist then an error will be reported upon first read.\nfunc NewReader(c context.Context, blobKey appengine.BlobKey) Reader {\n\treturn openBlob(c, blobKey)\n}\n\n// BlobKeyForFile returns a BlobKey for a Google Storage file.\n// The filename should be of the form \"/gs/bucket_name/object_name\".\nfunc BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) {\n\treq := &blobpb.CreateEncodedGoogleStorageKeyRequest{\n\t\tFilename: &filename,\n\t}\n\tres := &blobpb.CreateEncodedGoogleStorageKeyResponse{}\n\tif err := internal.Call(c, \"blobstore\", \"CreateEncodedGoogleStorageKey\", req, res); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn appengine.BlobKey(*res.BlobKey), nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/blobstore/read.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage blobstore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\n\tblobpb \"google.golang.org/appengine/internal/blobstore\"\n)\n\n// openBlob returns a reader for a blob. It always succeeds; if the blob does\n// not exist then an error will be reported upon first read.\nfunc openBlob(c context.Context, blobKey appengine.BlobKey) Reader {\n\treturn &reader{\n\t\tc:       c,\n\t\tblobKey: blobKey,\n\t}\n}\n\nconst readBufferSize = 256 * 1024\n\n// reader is a blob reader. It implements the Reader interface.\ntype reader struct {\n\tc context.Context\n\n\t// Either blobKey or filename is set:\n\tblobKey  appengine.BlobKey\n\tfilename string\n\n\tcloseFunc func() // is nil if unavailable or already closed.\n\n\t// buf is the read buffer. r is how much of buf has been read.\n\t// off is the offset of buf[0] relative to the start of the blob.\n\t// An invariant is 0 <= r && r <= len(buf).\n\t// Reads that don't require an RPC call will increment r but not off.\n\t// Seeks may modify r without discarding the buffer, but only if the\n\t// invariant can be maintained.\n\tmu  sync.Mutex\n\tbuf []byte\n\tr   int\n\toff int64\n}\n\nfunc (r *reader) Close() error {\n\tif f := r.closeFunc; f != nil {\n\t\tf()\n\t}\n\tr.closeFunc = nil\n\treturn nil\n}\n\nfunc (r *reader) Read(p []byte) (int, error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.r == len(r.buf) {\n\t\tif err := r.fetch(r.off + int64(r.r)); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tn := copy(p, r.buf[r.r:])\n\tr.r += n\n\treturn n, nil\n}\n\nfunc (r *reader) ReadAt(p []byte, off int64) (int, error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\t// Convert relative offsets to absolute offsets.\n\tab0 := r.off + int64(r.r)\n\tab1 := r.off + int64(len(r.buf))\n\tap0 := off\n\tap1 := off + int64(len(p))\n\t// Check if we can satisfy the read entirely out of the existing buffer.\n\tif r.off <= ap0 && ap1 <= ab1 {\n\t\t// Convert off from an absolute offset to a relative offset.\n\t\trp0 := int(ap0 - r.off)\n\t\treturn copy(p, r.buf[rp0:]), nil\n\t}\n\t// Restore the original Read/Seek offset after ReadAt completes.\n\tdefer r.seek(ab0)\n\t// Repeatedly fetch and copy until we have filled p.\n\tn := 0\n\tfor len(p) > 0 {\n\t\tif err := r.fetch(off + int64(n)); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tr.r = copy(p, r.buf)\n\t\tn += r.r\n\t\tp = p[r.r:]\n\t}\n\treturn n, nil\n}\n\nfunc (r *reader) Seek(offset int64, whence int) (ret int64, err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tswitch whence {\n\tcase os.SEEK_SET:\n\t\tret = offset\n\tcase os.SEEK_CUR:\n\t\tret = r.off + int64(r.r) + offset\n\tcase os.SEEK_END:\n\t\treturn 0, errors.New(\"seeking relative to the end of a blob isn't supported\")\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"invalid Seek whence value: %d\", whence)\n\t}\n\tif ret < 0 {\n\t\treturn 0, errors.New(\"negative Seek offset\")\n\t}\n\treturn r.seek(ret)\n}\n\n// fetch fetches readBufferSize bytes starting at the given offset. On success,\n// the data is saved as r.buf.\nfunc (r *reader) fetch(off int64) error {\n\treq := &blobpb.FetchDataRequest{\n\t\tBlobKey:    proto.String(string(r.blobKey)),\n\t\tStartIndex: proto.Int64(off),\n\t\tEndIndex:   proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive.\n\t}\n\tres := &blobpb.FetchDataResponse{}\n\tif err := internal.Call(r.c, \"blobstore\", \"FetchData\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif len(res.Data) == 0 {\n\t\treturn io.EOF\n\t}\n\tr.buf, r.r, r.off = res.Data, 0, off\n\treturn nil\n}\n\n// seek seeks to the given offset with an effective whence equal to SEEK_SET.\n// It discards the read buffer if the invariant cannot be maintained.\nfunc (r *reader) seek(off int64) (int64, error) {\n\tdelta := off - r.off\n\tif delta >= 0 && delta < int64(len(r.buf)) {\n\t\tr.r = int(delta)\n\t\treturn off, nil\n\t}\n\tr.buf, r.r, r.off = nil, 0, off\n\treturn off, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/capability/capability.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage capability exposes information about outages and scheduled downtime\nfor specific API capabilities.\n\nThis package does not work on Managed VMs.\n\nExample:\n\tif !capability.Enabled(c, \"datastore_v3\", \"write\") {\n\t\t// show user a different page\n\t}\n*/\npackage capability\n\nimport (\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\t\"google.golang.org/appengine/log\"\n\n\tpb \"google.golang.org/appengine/internal/capability\"\n)\n\n// Enabled returns whether an API's capabilities are enabled.\n// The wildcard \"*\" capability matches every capability of an API.\n// If the underlying RPC fails (if the package is unknown, for example),\n// false is returned and information is written to the application log.\nfunc Enabled(ctx context.Context, api, capability string) bool {\n\treq := &pb.IsEnabledRequest{\n\t\tPackage:    &api,\n\t\tCapability: []string{capability},\n\t}\n\tres := &pb.IsEnabledResponse{}\n\tif err := internal.Call(ctx, \"capability_service\", \"IsEnabled\", req, res); err != nil {\n\t\tlog.Warningf(ctx, \"capability.Enabled: RPC failed: %v\", err)\n\t\treturn false\n\t}\n\tswitch *res.SummaryStatus {\n\tcase pb.IsEnabledResponse_ENABLED,\n\t\tpb.IsEnabledResponse_SCHEDULED_FUTURE,\n\t\tpb.IsEnabledResponse_SCHEDULED_NOW:\n\t\treturn true\n\tcase pb.IsEnabledResponse_UNKNOWN:\n\t\tlog.Errorf(ctx, \"capability.Enabled: unknown API capability %s/%s\", api, capability)\n\t\treturn false\n\tdefault:\n\t\treturn false\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/channel/channel.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage channel implements the server side of App Engine's Channel API.\n\nCreate creates a new channel associated with the given clientID,\nwhich must be unique to the client that will use the returned token.\n\n\ttoken, err := channel.Create(c, \"player1\")\n\tif err != nil {\n\t\t// handle error\n\t}\n\t// return token to the client in an HTTP response\n\nSend sends a message to the client over the channel identified by clientID.\n\n\tchannel.Send(c, \"player1\", \"Game over!\")\n*/\npackage channel\n\nimport (\n\t\"encoding/json\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tbasepb \"google.golang.org/appengine/internal/base\"\n\tpb \"google.golang.org/appengine/internal/channel\"\n)\n\n// Create creates a channel and returns a token for use by the client.\n// The clientID is an application-provided string used to identify the client.\nfunc Create(c context.Context, clientID string) (token string, err error) {\n\treq := &pb.CreateChannelRequest{\n\t\tApplicationKey: &clientID,\n\t}\n\tresp := &pb.CreateChannelResponse{}\n\terr = internal.Call(c, service, \"CreateChannel\", req, resp)\n\ttoken = resp.GetToken()\n\treturn token, remapError(err)\n}\n\n// Send sends a message on the channel associated with clientID.\nfunc Send(c context.Context, clientID, message string) error {\n\treq := &pb.SendMessageRequest{\n\t\tApplicationKey: &clientID,\n\t\tMessage:        &message,\n\t}\n\tresp := &basepb.VoidProto{}\n\treturn remapError(internal.Call(c, service, \"SendChannelMessage\", req, resp))\n}\n\n// SendJSON is a helper function that sends a JSON-encoded value\n// on the channel associated with clientID.\nfunc SendJSON(c context.Context, clientID string, value interface{}) error {\n\tm, err := json.Marshal(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Send(c, clientID, string(m))\n}\n\n// remapError fixes any APIError referencing \"xmpp\" into one referencing \"channel\".\nfunc remapError(err error) error {\n\tif e, ok := err.(*internal.APIError); ok {\n\t\tif e.Service == \"xmpp\" {\n\t\t\te.Service = \"channel\"\n\t\t}\n\t}\n\treturn err\n}\n\nvar service = \"xmpp\" // prod\n\nfunc init() {\n\tif appengine.IsDevAppServer() {\n\t\tservice = \"channel\" // dev\n\t}\n\tinternal.RegisterErrorCodeMap(\"channel\", pb.ChannelServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/cloudsql/cloudsql.go",
    "content": "// Copyright 2013 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage cloudsql exposes access to Google Cloud SQL databases.\n\nThis package does not work on Managed VMs.\n\nThis package is intended for MySQL drivers to make App Engine-specific\nconnections. Applications should use this package through database/sql:\nSelect a pure Go MySQL driver that supports this package, and use sql.Open\nwith protocol \"cloudsql\" and an address of the Cloud SQL instance.\n\nA Go MySQL driver that has been tested to work well with Cloud SQL\nis the go-sql-driver:\n\timport \"database/sql\"\n\timport _ \"github.com/go-sql-driver/mysql\"\n\n\tdb, err := sql.Open(\"mysql\", \"user@cloudsql(project-id:instance-name)/dbname\")\n\n\nAnother driver that works well with Cloud SQL is the mymysql driver:\n\timport \"database/sql\"\n\timport _ \"github.com/ziutek/mymysql/godrv\"\n\n\tdb, err := sql.Open(\"mymysql\", \"cloudsql:instance-name*dbname/user/password\")\n\n\nUsing either of these drivers, you can perform a standard SQL query.\nThis example assumes there is a table named 'users' with\ncolumns 'first_name' and 'last_name':\n\n\trows, err := db.Query(\"SELECT first_name, last_name FROM users\")\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"db.Query: %v\", err)\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar firstName string\n\t\tvar lastName string\n\t\tif err := rows.Scan(&firstName, &lastName); err != nil {\n\t\t\tlog.Errorf(ctx, \"rows.Scan: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Infof(ctx, \"First: %v - Last: %v\", firstName, lastName)\n\t}\n\tif err := rows.Err(); err != nil {\n\t\tlog.Errorf(ctx, \"Row error: %v\", err)\n\t}\n*/\npackage cloudsql\n\nimport (\n\t\"net\"\n)\n\n// Dial connects to the named Cloud SQL instance.\nfunc Dial(instance string) (net.Conn, error) {\n\treturn connect(instance)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go",
    "content": "// Copyright 2013 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build appengine\n\npackage cloudsql\n\nimport (\n\t\"net\"\n\n\t\"appengine/cloudsql\"\n)\n\nfunc connect(instance string) (net.Conn, error) {\n\treturn cloudsql.Dial(instance)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go",
    "content": "// Copyright 2013 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build !appengine\n\npackage cloudsql\n\nimport (\n\t\"errors\"\n\t\"net\"\n)\n\nfunc connect(instance string) (net.Conn, error) {\n\treturn nil, errors.New(\"cloudsql: not supported in Managed VMs\")\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Program aebundler turns a Go app into a fully self-contained tar file.\n// The app and its subdirectories (if any) are placed under \".\"\n// and the dependencies from $GOPATH are placed under ./_gopath/src.\n// A main func is synthesized if one does not exist.\n//\n// A sample Dockerfile to be used with this bundler could look like this:\n//     FROM gcr.io/google_appengine/go-compat\n//     ADD . /app\n//     RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe\npackage main\n\nimport (\n\t\"archive/tar\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go/ast\"\n\t\"go/build\"\n\t\"go/parser\"\n\t\"go/token\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\nvar (\n\toutput  = flag.String(\"o\", \"\", \"name of output tar file or '-' for stdout\")\n\trootDir = flag.String(\"root\", \".\", \"directory name of application root\")\n\tvm      = flag.Bool(\"vm\", true, \"bundle a Managed VM app\")\n\n\tskipFiles = map[string]bool{\n\t\t\".git\":        true,\n\t\t\".gitconfig\":  true,\n\t\t\".hg\":         true,\n\t\t\".travis.yml\": true,\n\t}\n)\n\nconst (\n\tnewMain = `package main\nimport \"google.golang.org/appengine\"\nfunc main() {\n\tappengine.Main()\n}\n`\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\tfmt.Fprintf(os.Stderr, \"\\t%s -o <file.tar|->\\tBundle app to named tar file or stdout\\n\", os.Args[0])\n\tfmt.Fprintf(os.Stderr, \"\\noptional arguments:\\n\")\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tvar tags []string\n\tif *vm {\n\t\ttags = append(tags, \"appenginevm\")\n\t} else {\n\t\ttags = append(tags, \"appengine\")\n\t}\n\n\ttarFile := *output\n\tif tarFile == \"\" {\n\t\tusage()\n\t\terrorf(\"Required -o flag not specified.\")\n\t}\n\n\tapp, err := analyze(tags)\n\tif err != nil {\n\t\terrorf(\"Error analyzing app: %v\", err)\n\t}\n\tif err := app.bundle(tarFile); err != nil {\n\t\terrorf(\"Unable to bundle app: %v\", err)\n\t}\n}\n\n// errorf prints the error message and exits.\nfunc errorf(format string, a ...interface{}) {\n\tfmt.Fprintf(os.Stderr, \"aebundler: \"+format+\"\\n\", a...)\n\tos.Exit(1)\n}\n\ntype app struct {\n\thasMain  bool\n\tappFiles []string\n\timports  map[string]string\n}\n\n// analyze checks the app for building with the given build tags and returns hasMain,\n// app files, and a map of full directory import names to original import names.\nfunc analyze(tags []string) (*app, error) {\n\tctxt := buildContext(tags)\n\thasMain, appFiles, err := checkMain(ctxt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgopath := filepath.SplitList(ctxt.GOPATH)\n\tim, err := imports(ctxt, *rootDir, gopath)\n\treturn &app{\n\t\thasMain:  hasMain,\n\t\tappFiles: appFiles,\n\t\timports:  im,\n\t}, err\n}\n\n// buildContext returns the context for building the source.\nfunc buildContext(tags []string) *build.Context {\n\treturn &build.Context{\n\t\tGOARCH:    build.Default.GOARCH,\n\t\tGOOS:      build.Default.GOOS,\n\t\tGOROOT:    build.Default.GOROOT,\n\t\tGOPATH:    build.Default.GOPATH,\n\t\tCompiler:  build.Default.Compiler,\n\t\tBuildTags: append(build.Default.BuildTags, tags...),\n\t}\n}\n\n// bundle bundles the app into the named tarFile (\"-\"==stdout).\nfunc (s *app) bundle(tarFile string) (err error) {\n\tvar out io.Writer\n\tif tarFile == \"-\" {\n\t\tout = os.Stdout\n\t} else {\n\t\tf, err := os.Create(tarFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\tif cerr := f.Close(); err == nil {\n\t\t\t\terr = cerr\n\t\t\t}\n\t\t}()\n\t\tout = f\n\t}\n\ttw := tar.NewWriter(out)\n\n\tfor srcDir, importName := range s.imports {\n\t\tdstDir := \"_gopath/src/\" + importName\n\t\tif err = copyTree(tw, dstDir, srcDir); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to copy directory %v to %v: %v\", srcDir, dstDir, err)\n\t\t}\n\t}\n\tif err := copyTree(tw, \".\", *rootDir); err != nil {\n\t\treturn fmt.Errorf(\"unable to copy root directory to /app: %v\", err)\n\t}\n\tif !s.hasMain {\n\t\tif err := synthesizeMain(tw, s.appFiles); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to synthesize new main func: %v\", err)\n\t\t}\n\t}\n\n\tif err := tw.Close(); err != nil {\n\t\treturn fmt.Errorf(\"unable to close tar file %v: %v\", tarFile, err)\n\t}\n\treturn nil\n}\n\n// synthesizeMain generates a new main func and writes it to the tarball.\nfunc synthesizeMain(tw *tar.Writer, appFiles []string) error {\n\tappMap := make(map[string]bool)\n\tfor _, f := range appFiles {\n\t\tappMap[f] = true\n\t}\n\tvar f string\n\tfor i := 0; i < 100; i++ {\n\t\tf = fmt.Sprintf(\"app_main%d.go\", i)\n\t\tif !appMap[filepath.Join(*rootDir, f)] {\n\t\t\tbreak\n\t\t}\n\t}\n\tif appMap[filepath.Join(*rootDir, f)] {\n\t\treturn fmt.Errorf(\"unable to find unique name for %v\", f)\n\t}\n\thdr := &tar.Header{\n\t\tName: f,\n\t\tMode: 0644,\n\t\tSize: int64(len(newMain)),\n\t}\n\tif err := tw.WriteHeader(hdr); err != nil {\n\t\treturn fmt.Errorf(\"unable to write header for %v: %v\", f, err)\n\t}\n\tif _, err := tw.Write([]byte(newMain)); err != nil {\n\t\treturn fmt.Errorf(\"unable to write %v to tar file: %v\", f, err)\n\t}\n\treturn nil\n}\n\n// imports returns a map of all import directories (recursively) used by the app.\n// The return value maps full directory names to original import names.\nfunc imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {\n\tpkg, err := ctxt.ImportDir(srcDir, 0)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to analyze source: %v\", err)\n\t}\n\n\t// Resolve all non-standard-library imports\n\tresult := make(map[string]string)\n\tfor _, v := range pkg.Imports {\n\t\tif !strings.Contains(v, \".\") {\n\t\t\tcontinue\n\t\t}\n\t\tsrc, err := findInGopath(v, gopath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to find import %v in gopath %v: %v\", v, gopath, err)\n\t\t}\n\t\tresult[src] = v\n\t\tim, err := imports(ctxt, src, gopath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to parse package %v: %v\", src, err)\n\t\t}\n\t\tfor k, v := range im {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\treturn result, nil\n}\n\n// findInGopath searches the gopath for the named import directory.\nfunc findInGopath(dir string, gopath []string) (string, error) {\n\tfor _, v := range gopath {\n\t\tdst := filepath.Join(v, \"src\", dir)\n\t\tif _, err := os.Stat(dst); err == nil {\n\t\t\treturn dst, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"unable to find package %v in gopath %v\", dir, gopath)\n}\n\n// copyTree copies srcDir to tar file dstDir, ignoring skipFiles.\nfunc copyTree(tw *tar.Writer, dstDir, srcDir string) error {\n\tentries, err := ioutil.ReadDir(srcDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to read dir %v: %v\", srcDir, err)\n\t}\n\tfor _, entry := range entries {\n\t\tn := entry.Name()\n\t\tif skipFiles[n] {\n\t\t\tcontinue\n\t\t}\n\t\ts := filepath.Join(srcDir, n)\n\t\td := filepath.Join(dstDir, n)\n\t\tif entry.IsDir() {\n\t\t\tif err := copyTree(tw, d, s); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to copy dir %v to %v: %v\", s, d, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err := copyFile(tw, d, s); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to copy dir %v to %v: %v\", s, d, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// copyFile copies src to tar file dst.\nfunc copyFile(tw *tar.Writer, dst, src string) error {\n\ts, err := os.Open(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open %v: %v\", src, err)\n\t}\n\tdefer s.Close()\n\tfi, err := s.Stat()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to stat %v: %v\", src, err)\n\t}\n\n\thdr, err := tar.FileInfoHeader(fi, dst)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create tar header for %v: %v\", dst, err)\n\t}\n\thdr.Name = dst\n\tif err := tw.WriteHeader(hdr); err != nil {\n\t\treturn fmt.Errorf(\"unable to write header for %v: %v\", dst, err)\n\t}\n\t_, err = io.Copy(tw, s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to copy %v to %v: %v\", src, dst, err)\n\t}\n\treturn nil\n}\n\n// checkMain verifies that there is a single \"main\" function.\n// It also returns a list of all Go source files in the app.\nfunc checkMain(ctxt *build.Context) (bool, []string, error) {\n\tpkg, err := ctxt.ImportDir(*rootDir, 0)\n\tif err != nil {\n\t\treturn false, nil, fmt.Errorf(\"unable to analyze source: %v\", err)\n\t}\n\tif !pkg.IsCommand() {\n\t\terrorf(\"Your app's package needs to be changed from %q to \\\"main\\\".\\n\", pkg.Name)\n\t}\n\t// Search for a \"func main\"\n\tvar hasMain bool\n\tvar appFiles []string\n\tfor _, f := range pkg.GoFiles {\n\t\tn := filepath.Join(*rootDir, f)\n\t\tappFiles = append(appFiles, n)\n\t\tif hasMain, err = readFile(n); err != nil {\n\t\t\treturn false, nil, fmt.Errorf(\"error parsing %q: %v\", n, err)\n\t\t}\n\t}\n\treturn hasMain, appFiles, nil\n}\n\n// isMain returns whether the given function declaration is a main function.\n// Such a function must be called \"main\", not have a receiver, and have no arguments or return types.\nfunc isMain(f *ast.FuncDecl) bool {\n\tft := f.Type\n\treturn f.Name.Name == \"main\" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0\n}\n\n// readFile reads and parses the Go source code file and returns whether it has a main function.\nfunc readFile(filename string) (hasMain bool, err error) {\n\tvar src []byte\n\tsrc, err = ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\tfset := token.NewFileSet()\n\tfile, err := parser.ParseFile(fset, filename, src, 0)\n\tfor _, decl := range file.Decls {\n\t\tfuncDecl, ok := decl.(*ast.FuncDecl)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif !isMain(funcDecl) {\n\t\t\tcontinue\n\t\t}\n\t\thasMain = true\n\t\tbreak\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Program aedeploy assists with deploying Go Managed VM apps to production.\n// A temporary directory is created; the app, its subdirectories, and all its\n// dependencies from $GOPATH are copied into the directory; then the app\n// is deployed to production with the provided command.\n//\n// The app must be in \"package main\".\n//\n// This command must be issued from within the root directory of the app\n// (where the app.yaml file is located).\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go/build\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\nvar (\n\tskipFiles = map[string]bool{\n\t\t\".git\":        true,\n\t\t\".gitconfig\":  true,\n\t\t\".hg\":         true,\n\t\t\".travis.yml\": true,\n\t}\n\n\tgopathCache = map[string]string{}\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\tfmt.Fprintf(os.Stderr, \"\\t%s gcloud --verbosity debug preview app deploy --version myversion ./app.yaml\\tDeploy app to production\\n\", os.Args[0])\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\tif flag.NArg() < 1 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tif err := aedeploy(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, os.Args[0]+\": Error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc aedeploy() error {\n\ttags := []string{\"appenginevm\"}\n\tapp, err := analyze(tags)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpDir, err := app.bundle()\n\tif tmpDir != \"\" {\n\t\tdefer os.RemoveAll(tmpDir)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Chdir(tmpDir); err != nil {\n\t\treturn fmt.Errorf(\"unable to chdir to %v: %v\", tmpDir, err)\n\t}\n\treturn deploy()\n}\n\n// deploy calls the provided command to deploy the app from the temporary directory.\nfunc deploy() error {\n\tcmd := exec.Command(flag.Arg(0), flag.Args()[1:]...)\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"unable to run %q: %v\", strings.Join(flag.Args(), \" \"), err)\n\t}\n\treturn nil\n}\n\ntype app struct {\n\tappFiles []string\n\timports  map[string]string\n}\n\n// analyze checks the app for building with the given build tags and returns\n// app files, and a map of full directory import names to original import names.\nfunc analyze(tags []string) (*app, error) {\n\tctxt := buildContext(tags)\n\tappFiles, err := appFiles(ctxt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgopath := filepath.SplitList(ctxt.GOPATH)\n\tim, err := imports(ctxt, \".\", gopath)\n\treturn &app{\n\t\tappFiles: appFiles,\n\t\timports:  im,\n\t}, err\n}\n\n// buildContext returns the context for building the source.\nfunc buildContext(tags []string) *build.Context {\n\treturn &build.Context{\n\t\tGOARCH:    \"amd64\",\n\t\tGOOS:      \"linux\",\n\t\tGOROOT:    build.Default.GOROOT,\n\t\tGOPATH:    build.Default.GOPATH,\n\t\tCompiler:  build.Default.Compiler,\n\t\tBuildTags: append(build.Default.BuildTags, tags...),\n\t}\n}\n\n// bundle bundles the app into a temporary directory.\nfunc (s *app) bundle() (tmpdir string, err error) {\n\tworkDir, err := ioutil.TempDir(\"\", \"aedeploy\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to create tmpdir: %v\", err)\n\t}\n\n\tfor srcDir, importName := range s.imports {\n\t\tdstDir := \"_gopath/src/\" + importName\n\t\tif err := copyTree(workDir, dstDir, srcDir); err != nil {\n\t\t\treturn workDir, fmt.Errorf(\"unable to copy directory %v to %v: %v\", srcDir, dstDir, err)\n\t\t}\n\t}\n\tif err := copyTree(workDir, \".\", \".\"); err != nil {\n\t\treturn workDir, fmt.Errorf(\"unable to copy root directory to /app: %v\", err)\n\t}\n\treturn workDir, nil\n}\n\n// imports returns a map of all import directories (recursively) used by the app.\n// The return value maps full directory names to original import names.\nfunc imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {\n\tpkg, err := ctxt.ImportDir(srcDir, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Resolve all non-standard-library imports\n\tresult := make(map[string]string)\n\tfor _, v := range pkg.Imports {\n\t\tif !strings.Contains(v, \".\") {\n\t\t\tcontinue\n\t\t}\n\t\tsrc, err := findInGopath(v, gopath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to find import %v in gopath %v: %v\", v, gopath, err)\n\t\t}\n\t\tif _, ok := result[src]; ok { // Already processed\n\t\t\tcontinue\n\t\t}\n\t\tresult[src] = v\n\t\tim, err := imports(ctxt, src, gopath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to parse package %v: %v\", src, err)\n\t\t}\n\t\tfor k, v := range im {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\treturn result, nil\n}\n\n// findInGopath searches the gopath for the named import directory.\nfunc findInGopath(dir string, gopath []string) (string, error) {\n\tif v, ok := gopathCache[dir]; ok {\n\t\treturn v, nil\n\t}\n\tfor _, v := range gopath {\n\t\tdst := filepath.Join(v, \"src\", dir)\n\t\tif _, err := os.Stat(dst); err == nil {\n\t\t\tgopathCache[dir] = dst\n\t\t\treturn dst, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"unable to find package %v in gopath %v\", dir, gopath)\n}\n\n// copyTree copies srcDir to dstDir relative to dstRoot, ignoring skipFiles.\nfunc copyTree(dstRoot, dstDir, srcDir string) error {\n\td := filepath.Join(dstRoot, dstDir)\n\tif err := os.MkdirAll(d, 0755); err != nil {\n\t\treturn fmt.Errorf(\"unable to create directory %q: %v\", d, err)\n\t}\n\n\tentries, err := ioutil.ReadDir(srcDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to read dir %q: %v\", srcDir, err)\n\t}\n\tfor _, entry := range entries {\n\t\tn := entry.Name()\n\t\tif skipFiles[n] {\n\t\t\tcontinue\n\t\t}\n\t\ts := filepath.Join(srcDir, n)\n\t\tif entry.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\t\tif entry, err = os.Stat(s); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to stat %v: %v\", s, err)\n\t\t\t}\n\t\t}\n\t\td := filepath.Join(dstDir, n)\n\t\tif entry.IsDir() {\n\t\t\tif err := copyTree(dstRoot, d, s); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to copy dir %q to %q: %v\", s, d, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err := copyFile(dstRoot, d, s); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to copy dir %q to %q: %v\", s, d, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// copyFile copies src to dst relative to dstRoot.\nfunc copyFile(dstRoot, dst, src string) error {\n\ts, err := os.Open(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open %q: %v\", src, err)\n\t}\n\tdefer s.Close()\n\n\tdst = filepath.Join(dstRoot, dst)\n\td, err := os.Create(dst)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create %q: %v\", dst, err)\n\t}\n\t_, err = io.Copy(d, s)\n\tif err != nil {\n\t\td.Close() // ignore error, copy already failed.\n\t\treturn fmt.Errorf(\"unable to copy %q to %q: %v\", src, dst, err)\n\t}\n\tif err := d.Close(); err != nil {\n\t\treturn fmt.Errorf(\"unable to close %q: %v\", dst, err)\n\t}\n\treturn nil\n}\n\n// appFiles returns a list of all Go source files in the app.\nfunc appFiles(ctxt *build.Context) ([]string, error) {\n\tpkg, err := ctxt.ImportDir(\".\", 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !pkg.IsCommand() {\n\t\treturn nil, fmt.Errorf(`the root of your app needs to be package \"main\" (currently %q). Please see https://cloud.google.com/appengine/docs/go/managed-vms for more details on structuring your app.`, pkg.Name)\n\t}\n\tvar appFiles []string\n\tfor _, f := range pkg.GoFiles {\n\t\tn := filepath.Join(\".\", f)\n\t\tappFiles = append(appFiles, n)\n\t}\n\treturn appFiles, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/datastore.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/datastore\"\n)\n\nvar (\n\t// ErrInvalidEntityType is returned when functions like Get or Next are\n\t// passed a dst or src argument of invalid type.\n\tErrInvalidEntityType = errors.New(\"datastore: invalid entity type\")\n\t// ErrInvalidKey is returned when an invalid key is presented.\n\tErrInvalidKey = errors.New(\"datastore: invalid key\")\n\t// ErrNoSuchEntity is returned when no entity was found for a given key.\n\tErrNoSuchEntity = errors.New(\"datastore: no such entity\")\n)\n\n// ErrFieldMismatch is returned when a field is to be loaded into a different\n// type than the one it was stored from, or when a field is missing or\n// unexported in the destination struct.\n// StructType is the type of the struct pointed to by the destination argument\n// passed to Get or to Iterator.Next.\ntype ErrFieldMismatch struct {\n\tStructType reflect.Type\n\tFieldName  string\n\tReason     string\n}\n\nfunc (e *ErrFieldMismatch) Error() string {\n\treturn fmt.Sprintf(\"datastore: cannot load field %q into a %q: %s\",\n\t\te.FieldName, e.StructType, e.Reason)\n}\n\n// protoToKey converts a Reference proto to a *Key.\nfunc protoToKey(r *pb.Reference) (k *Key, err error) {\n\tappID := r.GetApp()\n\tnamespace := r.GetNameSpace()\n\tfor _, e := range r.Path.Element {\n\t\tk = &Key{\n\t\t\tkind:      e.GetType(),\n\t\t\tstringID:  e.GetName(),\n\t\t\tintID:     e.GetId(),\n\t\t\tparent:    k,\n\t\t\tappID:     appID,\n\t\t\tnamespace: namespace,\n\t\t}\n\t\tif !k.valid() {\n\t\t\treturn nil, ErrInvalidKey\n\t\t}\n\t}\n\treturn\n}\n\n// keyToProto converts a *Key to a Reference proto.\nfunc keyToProto(defaultAppID string, k *Key) *pb.Reference {\n\tappID := k.appID\n\tif appID == \"\" {\n\t\tappID = defaultAppID\n\t}\n\tn := 0\n\tfor i := k; i != nil; i = i.parent {\n\t\tn++\n\t}\n\te := make([]*pb.Path_Element, n)\n\tfor i := k; i != nil; i = i.parent {\n\t\tn--\n\t\te[n] = &pb.Path_Element{\n\t\t\tType: &i.kind,\n\t\t}\n\t\t// At most one of {Name,Id} should be set.\n\t\t// Neither will be set for incomplete keys.\n\t\tif i.stringID != \"\" {\n\t\t\te[n].Name = &i.stringID\n\t\t} else if i.intID != 0 {\n\t\t\te[n].Id = &i.intID\n\t\t}\n\t}\n\tvar namespace *string\n\tif k.namespace != \"\" {\n\t\tnamespace = proto.String(k.namespace)\n\t}\n\treturn &pb.Reference{\n\t\tApp:       proto.String(appID),\n\t\tNameSpace: namespace,\n\t\tPath: &pb.Path{\n\t\t\tElement: e,\n\t\t},\n\t}\n}\n\n// multiKeyToProto is a batch version of keyToProto.\nfunc multiKeyToProto(appID string, key []*Key) []*pb.Reference {\n\tret := make([]*pb.Reference, len(key))\n\tfor i, k := range key {\n\t\tret[i] = keyToProto(appID, k)\n\t}\n\treturn ret\n}\n\n// multiValid is a batch version of Key.valid. It returns an error, not a\n// []bool.\nfunc multiValid(key []*Key) error {\n\tinvalid := false\n\tfor _, k := range key {\n\t\tif !k.valid() {\n\t\t\tinvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !invalid {\n\t\treturn nil\n\t}\n\terr := make(appengine.MultiError, len(key))\n\tfor i, k := range key {\n\t\tif !k.valid() {\n\t\t\terr[i] = ErrInvalidKey\n\t\t}\n\t}\n\treturn err\n}\n\n// It's unfortunate that the two semantically equivalent concepts pb.Reference\n// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the\n// two have different protobuf field numbers.\n\n// referenceValueToKey is the same as protoToKey except the input is a\n// PropertyValue_ReferenceValue instead of a Reference.\nfunc referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {\n\tappID := r.GetApp()\n\tnamespace := r.GetNameSpace()\n\tfor _, e := range r.Pathelement {\n\t\tk = &Key{\n\t\t\tkind:      e.GetType(),\n\t\t\tstringID:  e.GetName(),\n\t\t\tintID:     e.GetId(),\n\t\t\tparent:    k,\n\t\t\tappID:     appID,\n\t\t\tnamespace: namespace,\n\t\t}\n\t\tif !k.valid() {\n\t\t\treturn nil, ErrInvalidKey\n\t\t}\n\t}\n\treturn\n}\n\n// keyToReferenceValue is the same as keyToProto except the output is a\n// PropertyValue_ReferenceValue instead of a Reference.\nfunc keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {\n\tref := keyToProto(defaultAppID, k)\n\tpe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))\n\tfor i, e := range ref.Path.Element {\n\t\tpe[i] = &pb.PropertyValue_ReferenceValue_PathElement{\n\t\t\tType: e.Type,\n\t\t\tId:   e.Id,\n\t\t\tName: e.Name,\n\t\t}\n\t}\n\treturn &pb.PropertyValue_ReferenceValue{\n\t\tApp:         ref.App,\n\t\tNameSpace:   ref.NameSpace,\n\t\tPathelement: pe,\n\t}\n}\n\ntype multiArgType int\n\nconst (\n\tmultiArgTypeInvalid multiArgType = iota\n\tmultiArgTypePropertyLoadSaver\n\tmultiArgTypeStruct\n\tmultiArgTypeStructPtr\n\tmultiArgTypeInterface\n)\n\n// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct\n// type S, for some interface type I, or some non-interface non-pointer type P\n// such that P or *P implements PropertyLoadSaver.\n//\n// It returns what category the slice's elements are, and the reflect.Type\n// that represents S, I or P.\n//\n// As a special case, PropertyList is an invalid type for v.\nfunc checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {\n\tif v.Kind() != reflect.Slice {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\tif v.Type() == typeOfPropertyList {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\telemType = v.Type().Elem()\n\tif reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {\n\t\treturn multiArgTypePropertyLoadSaver, elemType\n\t}\n\tswitch elemType.Kind() {\n\tcase reflect.Struct:\n\t\treturn multiArgTypeStruct, elemType\n\tcase reflect.Interface:\n\t\treturn multiArgTypeInterface, elemType\n\tcase reflect.Ptr:\n\t\telemType = elemType.Elem()\n\t\tif elemType.Kind() == reflect.Struct {\n\t\t\treturn multiArgTypeStructPtr, elemType\n\t\t}\n\t}\n\treturn multiArgTypeInvalid, nil\n}\n\n// Get loads the entity stored for k into dst, which must be a struct pointer\n// or implement PropertyLoadSaver. If there is no such entity for the key, Get\n// returns ErrNoSuchEntity.\n//\n// The values of dst's unmatched struct fields are not modified, and matching\n// slice-typed fields are not reset before appending to them. In particular, it\n// is recommended to pass a pointer to a zero valued struct on each Get call.\n//\n// ErrFieldMismatch is returned when a field is to be loaded into a different\n// type than the one it was stored from, or when a field is missing or\n// unexported in the destination struct. ErrFieldMismatch is only returned if\n// dst is a struct pointer.\nfunc Get(c context.Context, key *Key, dst interface{}) error {\n\tif dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here\n\t\treturn ErrInvalidEntityType\n\t}\n\terr := GetMulti(c, []*Key{key}, []interface{}{dst})\n\tif me, ok := err.(appengine.MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// GetMulti is a batch version of Get.\n//\n// dst must be a []S, []*S, []I or []P, for some struct type S, some interface\n// type I, or some non-interface non-pointer type P such that P or *P\n// implements PropertyLoadSaver. If an []I, each element must be a valid dst\n// for Get: it must be a struct pointer or implement PropertyLoadSaver.\n//\n// As a special case, PropertyList is an invalid type for dst, even though a\n// PropertyList is a slice of structs. It is treated as invalid to avoid being\n// mistakenly passed when []PropertyList was intended.\nfunc GetMulti(c context.Context, key []*Key, dst interface{}) error {\n\tv := reflect.ValueOf(dst)\n\tmultiArgType, _ := checkMultiArg(v)\n\tif multiArgType == multiArgTypeInvalid {\n\t\treturn errors.New(\"datastore: dst has invalid type\")\n\t}\n\tif len(key) != v.Len() {\n\t\treturn errors.New(\"datastore: key and dst slices have different length\")\n\t}\n\tif len(key) == 0 {\n\t\treturn nil\n\t}\n\tif err := multiValid(key); err != nil {\n\t\treturn err\n\t}\n\treq := &pb.GetRequest{\n\t\tKey: multiKeyToProto(internal.FullyQualifiedAppID(c), key),\n\t}\n\tres := &pb.GetResponse{}\n\tif err := internal.Call(c, \"datastore_v3\", \"Get\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif len(key) != len(res.Entity) {\n\t\treturn errors.New(\"datastore: internal error: server returned the wrong number of entities\")\n\t}\n\tmultiErr, any := make(appengine.MultiError, len(key)), false\n\tfor i, e := range res.Entity {\n\t\tif e.Entity == nil {\n\t\t\tmultiErr[i] = ErrNoSuchEntity\n\t\t} else {\n\t\t\telem := v.Index(i)\n\t\t\tif multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {\n\t\t\t\telem = elem.Addr()\n\t\t\t}\n\t\t\tif multiArgType == multiArgTypeStructPtr && elem.IsNil() {\n\t\t\t\telem.Set(reflect.New(elem.Type().Elem()))\n\t\t\t}\n\t\t\tmultiErr[i] = loadEntity(elem.Interface(), e.Entity)\n\t\t}\n\t\tif multiErr[i] != nil {\n\t\t\tany = true\n\t\t}\n\t}\n\tif any {\n\t\treturn multiErr\n\t}\n\treturn nil\n}\n\n// Put saves the entity src into the datastore with key k. src must be a struct\n// pointer or implement PropertyLoadSaver; if a struct pointer then any\n// unexported fields of that struct will be skipped. If k is an incomplete key,\n// the returned key will be a unique key generated by the datastore.\nfunc Put(c context.Context, key *Key, src interface{}) (*Key, error) {\n\tk, err := PutMulti(c, []*Key{key}, []interface{}{src})\n\tif err != nil {\n\t\tif me, ok := err.(appengine.MultiError); ok {\n\t\t\treturn nil, me[0]\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn k[0], nil\n}\n\n// PutMulti is a batch version of Put.\n//\n// src must satisfy the same conditions as the dst argument to GetMulti.\nfunc PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {\n\tv := reflect.ValueOf(src)\n\tmultiArgType, _ := checkMultiArg(v)\n\tif multiArgType == multiArgTypeInvalid {\n\t\treturn nil, errors.New(\"datastore: src has invalid type\")\n\t}\n\tif len(key) != v.Len() {\n\t\treturn nil, errors.New(\"datastore: key and src slices have different length\")\n\t}\n\tif len(key) == 0 {\n\t\treturn nil, nil\n\t}\n\tappID := internal.FullyQualifiedAppID(c)\n\tif err := multiValid(key); err != nil {\n\t\treturn nil, err\n\t}\n\treq := &pb.PutRequest{}\n\tfor i := range key {\n\t\telem := v.Index(i)\n\t\tif multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {\n\t\t\telem = elem.Addr()\n\t\t}\n\t\tsProto, err := saveEntity(appID, key[i], elem.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treq.Entity = append(req.Entity, sProto)\n\t}\n\tres := &pb.PutResponse{}\n\tif err := internal.Call(c, \"datastore_v3\", \"Put\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(key) != len(res.Key) {\n\t\treturn nil, errors.New(\"datastore: internal error: server returned the wrong number of keys\")\n\t}\n\tret := make([]*Key, len(key))\n\tfor i := range ret {\n\t\tvar err error\n\t\tret[i], err = protoToKey(res.Key[i])\n\t\tif err != nil || ret[i].Incomplete() {\n\t\t\treturn nil, errors.New(\"datastore: internal error: server returned an invalid key\")\n\t\t}\n\t}\n\treturn ret, nil\n}\n\n// Delete deletes the entity for the given key.\nfunc Delete(c context.Context, key *Key) error {\n\terr := DeleteMulti(c, []*Key{key})\n\tif me, ok := err.(appengine.MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// DeleteMulti is a batch version of Delete.\nfunc DeleteMulti(c context.Context, key []*Key) error {\n\tif len(key) == 0 {\n\t\treturn nil\n\t}\n\tif err := multiValid(key); err != nil {\n\t\treturn err\n\t}\n\treq := &pb.DeleteRequest{\n\t\tKey: multiKeyToProto(internal.FullyQualifiedAppID(c), key),\n\t}\n\tres := &pb.DeleteResponse{}\n\treturn internal.Call(c, \"datastore_v3\", \"Delete\", req, res)\n}\n\nfunc namespaceMod(m proto.Message, namespace string) {\n\t// pb.Query is the only type that has a name_space field.\n\t// All other namespace support in datastore is in the keys.\n\tswitch m := m.(type) {\n\tcase *pb.Query:\n\t\tif m.NameSpace == nil {\n\t\t\tm.NameSpace = &namespace\n\t\t}\n\t}\n}\n\nfunc init() {\n\tinternal.NamespaceMods[\"datastore_v3\"] = namespaceMod\n\tinternal.RegisterErrorCodeMap(\"datastore_v3\", pb.Error_ErrorCode_name)\n\tinternal.RegisterTimeoutErrorCode(\"datastore_v3\", int32(pb.Error_TIMEOUT))\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/doc.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage datastore provides a client for App Engine's datastore service.\n\n\nBasic Operations\n\nEntities are the unit of storage and are associated with a key. A key\nconsists of an optional parent key, a string application ID, a string kind\n(also known as an entity type), and either a StringID or an IntID. A\nStringID is also known as an entity name or key name.\n\nIt is valid to create a key with a zero StringID and a zero IntID; this is\ncalled an incomplete key, and does not refer to any saved entity. Putting an\nentity into the datastore under an incomplete key will cause a unique key\nto be generated for that entity, with a non-zero IntID.\n\nAn entity's contents are a mapping from case-sensitive field names to values.\nValid value types are:\n  - signed integers (int, int8, int16, int32 and int64),\n  - bool,\n  - string,\n  - float32 and float64,\n  - []byte (up to 1 megabyte in length),\n  - any type whose underlying type is one of the above predeclared types,\n  - ByteString,\n  - *Key,\n  - time.Time (stored with microsecond precision),\n  - appengine.BlobKey,\n  - appengine.GeoPoint,\n  - structs whose fields are all valid value types,\n  - slices of any of the above.\n\nSlices of structs are valid, as are structs that contain slices. However, if\none struct contains another, then at most one of those can be repeated. This\ndisqualifies recursively defined struct types: any struct T that (directly or\nindirectly) contains a []T.\n\nThe Get and Put functions load and save an entity's contents. An entity's\ncontents are typically represented by a struct pointer.\n\nExample code:\n\n\ttype Entity struct {\n\t\tValue string\n\t}\n\n\tfunc handle(w http.ResponseWriter, r *http.Request) {\n\t\tctx := appengine.NewContext(r)\n\n\t\tk := datastore.NewKey(ctx, \"Entity\", \"stringID\", 0, nil)\n\t\te := new(Entity)\n\t\tif err := datastore.Get(ctx, k, e); err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\n\t\told := e.Value\n\t\te.Value = r.URL.Path\n\n\t\tif _, err := datastore.Put(ctx, k, e); err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tfmt.Fprintf(w, \"old=%q\\nnew=%q\\n\", old, e.Value)\n\t}\n\nGetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and\nDelete functions. They take a []*Key instead of a *Key, and may return an\nappengine.MultiError when encountering partial failure.\n\n\nProperties\n\nAn entity's contents can be represented by a variety of types. These are\ntypically struct pointers, but can also be any type that implements the\nPropertyLoadSaver interface. If using a struct pointer, you do not have to\nexplicitly implement the PropertyLoadSaver interface; the datastore will\nautomatically convert via reflection. If a struct pointer does implement that\ninterface then those methods will be used in preference to the default\nbehavior for struct pointers. Struct pointers are more strongly typed and are\neasier to use; PropertyLoadSavers are more flexible.\n\nThe actual types passed do not have to match between Get and Put calls or even\nacross different App Engine requests. It is valid to put a *PropertyList and\nget that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.\nConceptually, any entity is saved as a sequence of properties, and is loaded\ninto the destination value on a property-by-property basis. When loading into\na struct pointer, an entity that cannot be completely represented (such as a\nmissing field) will result in an ErrFieldMismatch error but it is up to the\ncaller whether this error is fatal, recoverable or ignorable.\n\nBy default, for struct pointers, all properties are potentially indexed, and\nthe property name is the same as the field name (and hence must start with an\nupper case letter). Fields may have a `datastore:\"name,options\"` tag. The tag\nname is the property name, which must be one or more valid Go identifiers\njoined by \".\", but may start with a lower case letter. An empty tag name means\nto just use the field name. A \"-\" tag name means that the datastore will\nignore that field. If options is \"noindex\" then the field will not be indexed.\nIf the options is \"\" then the comma may be omitted. There are no other\nrecognized options.\n\nFields (except for []byte) are indexed by default. Strings longer than 1500\nbytes cannot be indexed; fields used to store long strings should be\ntagged with \"noindex\". Similarly, ByteStrings longer than 1500 bytes cannot be\nindexed.\n\nExample code:\n\n\t// A and B are renamed to a and b.\n\t// A, C and J are not indexed.\n\t// D's tag is equivalent to having no tag at all (E).\n\t// I is ignored entirely by the datastore.\n\t// J has tag information for both the datastore and json packages.\n\ttype TaggedStruct struct {\n\t\tA int `datastore:\"a,noindex\"`\n\t\tB int `datastore:\"b\"`\n\t\tC int `datastore:\",noindex\"`\n\t\tD int `datastore:\"\"`\n\t\tE int\n\t\tI int `datastore:\"-\"`\n\t\tJ int `datastore:\",noindex\" json:\"j\"`\n\t}\n\n\nStructured Properties\n\nIf the struct pointed to contains other structs, then the nested or embedded\nstructs are flattened. For example, given these definitions:\n\n\ttype Inner1 struct {\n\t\tW int32\n\t\tX string\n\t}\n\n\ttype Inner2 struct {\n\t\tY float64\n\t}\n\n\ttype Inner3 struct {\n\t\tZ bool\n\t}\n\n\ttype Outer struct {\n\t\tA int16\n\t\tI []Inner1\n\t\tJ Inner2\n\t\tInner3\n\t}\n\nthen an Outer's properties would be equivalent to those of:\n\n\ttype OuterEquivalent struct {\n\t\tA     int16\n\t\tIDotW []int32  `datastore:\"I.W\"`\n\t\tIDotX []string `datastore:\"I.X\"`\n\t\tJDotY float64  `datastore:\"J.Y\"`\n\t\tZ     bool\n\t}\n\nIf Outer's embedded Inner3 field was tagged as `datastore:\"Foo\"` then the\nequivalent field would instead be: FooDotZ bool `datastore:\"Foo.Z\"`.\n\nIf an outer struct is tagged \"noindex\" then all of its implicit flattened\nfields are effectively \"noindex\".\n\n\nThe PropertyLoadSaver Interface\n\nAn entity's contents can also be represented by any type that implements the\nPropertyLoadSaver interface. This type may be a struct pointer, but it does\nnot have to be. The datastore package will call Load when getting the entity's\ncontents, and Save when putting the entity's contents.\nPossible uses include deriving non-stored fields, verifying fields, or indexing\na field only if its value is positive.\n\nExample code:\n\n\ttype CustomPropsExample struct {\n\t\tI, J int\n\t\t// Sum is not stored, but should always be equal to I + J.\n\t\tSum int `datastore:\"-\"`\n\t}\n\n\tfunc (x *CustomPropsExample) Load(ps []datastore.Property) error {\n\t\t// Load I and J as usual.\n\t\tif err := datastore.LoadStruct(x, ps); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Derive the Sum field.\n\t\tx.Sum = x.I + x.J\n\t\treturn nil\n\t}\n\n\tfunc (x *CustomPropsExample) Save() ([]datastore.Property, error) {\n\t\t// Validate the Sum field.\n\t\tif x.Sum != x.I + x.J {\n\t\t\treturn errors.New(\"CustomPropsExample has inconsistent sum\")\n\t\t}\n\t\t// Save I and J as usual. The code below is equivalent to calling\n\t\t// \"return datastore.SaveStruct(x)\", but is done manually for\n\t\t// demonstration purposes.\n\t\treturn []datastore.Property{\n\t\t\t{\n\t\t\t\tName:  \"I\",\n\t\t\t\tValue: int64(x.I),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"J\",\n\t\t\t\tValue: int64(x.J),\n\t\t\t},\n\t\t}\n\t}\n\nThe *PropertyList type implements PropertyLoadSaver, and can therefore hold an\narbitrary entity's contents.\n\n\nQueries\n\nQueries retrieve entities based on their properties or key's ancestry. Running\na query yields an iterator of results: either keys or (key, entity) pairs.\nQueries are re-usable and it is safe to call Query.Run from concurrent\ngoroutines. Iterators are not safe for concurrent use.\n\nQueries are immutable, and are either created by calling NewQuery, or derived\nfrom an existing query by calling a method like Filter or Order that returns a\nnew query value. A query is typically constructed by calling NewQuery followed\nby a chain of zero or more such methods. These methods are:\n  - Ancestor and Filter constrain the entities returned by running a query.\n  - Order affects the order in which they are returned.\n  - Project constrains the fields returned.\n  - Distinct de-duplicates projected entities.\n  - KeysOnly makes the iterator return only keys, not (key, entity) pairs.\n  - Start, End, Offset and Limit define which sub-sequence of matching entities\n    to return. Start and End take cursors, Offset and Limit take integers. Start\n    and Offset affect the first result, End and Limit affect the last result.\n    If both Start and Offset are set, then the offset is relative to Start.\n    If both End and Limit are set, then the earliest constraint wins. Limit is\n    relative to Start+Offset, not relative to End. As a special case, a\n    negative limit means unlimited.\n\nExample code:\n\n\ttype Widget struct {\n\t\tDescription string\n\t\tPrice       int\n\t}\n\n\tfunc handle(w http.ResponseWriter, r *http.Request) {\n\t\tctx := appengine.NewContext(r)\n\t\tq := datastore.NewQuery(\"Widget\").\n\t\t\tFilter(\"Price <\", 1000).\n\t\t\tOrder(\"-Price\")\n\t\tb := new(bytes.Buffer)\n\t\tfor t := q.Run(ctx); ; {\n\t\t\tvar x Widget\n\t\t\tkey, err := t.Next(&x)\n\t\t\tif err == datastore.Done {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tserveError(ctx, w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprintf(b, \"Key=%v\\nWidget=%#v\\n\\n\", key, x)\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tio.Copy(w, b)\n\t}\n\n\nTransactions\n\nRunInTransaction runs a function in a transaction.\n\nExample code:\n\n\ttype Counter struct {\n\t\tCount int\n\t}\n\n\tfunc inc(ctx context.Context, key *datastore.Key) (int, error) {\n\t\tvar x Counter\n\t\tif err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {\n\t\t\treturn 0, err\n\t\t}\n\t\tx.Count++\n\t\tif _, err := datastore.Put(ctx, key, &x); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn x.Count, nil\n\t}\n\n\tfunc handle(w http.ResponseWriter, r *http.Request) {\n\t\tctx := appengine.NewContext(r)\n\t\tvar count int\n\t\terr := datastore.RunInTransaction(ctx, func(ctx context.Context) error {\n\t\t\tvar err1 error\n\t\t\tcount, err1 = inc(ctx, datastore.NewKey(ctx, \"Counter\", \"singleton\", 0, nil))\n\t\t\treturn err1\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\tserveError(ctx, w, err)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tfmt.Fprintf(w, \"Count=%d\", count)\n\t}\n\n\nMetadata\n\nThe datastore package provides access to some of App Engine's datastore\nmetadata. This metadata includes information about the entity groups,\nnamespaces, entity kinds, and properties in the datastore, as well as the\nproperty representations for each property.\n\nExample code:\n\n\tfunc handle(w http.ResponseWriter, r *http.Request) {\n\t\t// Print all the kinds in the datastore, with all the indexed\n\t\t// properties (and their representations) for each.\n\t\tctx := appengine.NewContext(r)\n\n\t\tkinds, err := datastore.Kinds(ctx)\n\t\tif err != nil {\n\t\t\tserveError(ctx, w, err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tfor _, kind := range kinds {\n\t\t\tfmt.Fprintf(w, \"%s:\\n\", kind)\n\t\t\tprops, err := datastore.KindProperties(ctx, kind)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(w, \"\\t(unable to retrieve properties)\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor p, rep := range props {\n\t\t\t\tfmt.Fprintf(w, \"\\t-%s (%s)\\n\", p, strings.Join(\", \", rep))\n\t\t\t}\n\t\t}\n\t}\n*/\npackage datastore\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/key.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport (\n\t\"bytes\"\n\t\"encoding/base64\"\n\t\"encoding/gob\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/datastore\"\n)\n\n// Key represents the datastore key for a stored entity, and is immutable.\ntype Key struct {\n\tkind      string\n\tstringID  string\n\tintID     int64\n\tparent    *Key\n\tappID     string\n\tnamespace string\n}\n\n// Kind returns the key's kind (also known as entity type).\nfunc (k *Key) Kind() string {\n\treturn k.kind\n}\n\n// StringID returns the key's string ID (also known as an entity name or key\n// name), which may be \"\".\nfunc (k *Key) StringID() string {\n\treturn k.stringID\n}\n\n// IntID returns the key's integer ID, which may be 0.\nfunc (k *Key) IntID() int64 {\n\treturn k.intID\n}\n\n// Parent returns the key's parent key, which may be nil.\nfunc (k *Key) Parent() *Key {\n\treturn k.parent\n}\n\n// AppID returns the key's application ID.\nfunc (k *Key) AppID() string {\n\treturn k.appID\n}\n\n// Namespace returns the key's namespace.\nfunc (k *Key) Namespace() string {\n\treturn k.namespace\n}\n\n// Incomplete returns whether the key does not refer to a stored entity.\n// In particular, whether the key has a zero StringID and a zero IntID.\nfunc (k *Key) Incomplete() bool {\n\treturn k.stringID == \"\" && k.intID == 0\n}\n\n// valid returns whether the key is valid.\nfunc (k *Key) valid() bool {\n\tif k == nil {\n\t\treturn false\n\t}\n\tfor ; k != nil; k = k.parent {\n\t\tif k.kind == \"\" || k.appID == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tif k.stringID != \"\" && k.intID != 0 {\n\t\t\treturn false\n\t\t}\n\t\tif k.parent != nil {\n\t\t\tif k.parent.Incomplete() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif k.parent.appID != k.appID || k.parent.namespace != k.namespace {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\n// Equal returns whether two keys are equal.\nfunc (k *Key) Equal(o *Key) bool {\n\tfor k != nil && o != nil {\n\t\tif k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {\n\t\t\treturn false\n\t\t}\n\t\tk, o = k.parent, o.parent\n\t}\n\treturn k == o\n}\n\n// root returns the furthest ancestor of a key, which may be itself.\nfunc (k *Key) root() *Key {\n\tfor k.parent != nil {\n\t\tk = k.parent\n\t}\n\treturn k\n}\n\n// marshal marshals the key's string representation to the buffer.\nfunc (k *Key) marshal(b *bytes.Buffer) {\n\tif k.parent != nil {\n\t\tk.parent.marshal(b)\n\t}\n\tb.WriteByte('/')\n\tb.WriteString(k.kind)\n\tb.WriteByte(',')\n\tif k.stringID != \"\" {\n\t\tb.WriteString(k.stringID)\n\t} else {\n\t\tb.WriteString(strconv.FormatInt(k.intID, 10))\n\t}\n}\n\n// String returns a string representation of the key.\nfunc (k *Key) String() string {\n\tif k == nil {\n\t\treturn \"\"\n\t}\n\tb := bytes.NewBuffer(make([]byte, 0, 512))\n\tk.marshal(b)\n\treturn b.String()\n}\n\ntype gobKey struct {\n\tKind      string\n\tStringID  string\n\tIntID     int64\n\tParent    *gobKey\n\tAppID     string\n\tNamespace string\n}\n\nfunc keyToGobKey(k *Key) *gobKey {\n\tif k == nil {\n\t\treturn nil\n\t}\n\treturn &gobKey{\n\t\tKind:      k.kind,\n\t\tStringID:  k.stringID,\n\t\tIntID:     k.intID,\n\t\tParent:    keyToGobKey(k.parent),\n\t\tAppID:     k.appID,\n\t\tNamespace: k.namespace,\n\t}\n}\n\nfunc gobKeyToKey(gk *gobKey) *Key {\n\tif gk == nil {\n\t\treturn nil\n\t}\n\treturn &Key{\n\t\tkind:      gk.Kind,\n\t\tstringID:  gk.StringID,\n\t\tintID:     gk.IntID,\n\t\tparent:    gobKeyToKey(gk.Parent),\n\t\tappID:     gk.AppID,\n\t\tnamespace: gk.Namespace,\n\t}\n}\n\nfunc (k *Key) GobEncode() ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\tif err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}\n\nfunc (k *Key) GobDecode(buf []byte) error {\n\tgk := new(gobKey)\n\tif err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {\n\t\treturn err\n\t}\n\t*k = *gobKeyToKey(gk)\n\treturn nil\n}\n\nfunc (k *Key) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + k.Encode() + `\"`), nil\n}\n\nfunc (k *Key) UnmarshalJSON(buf []byte) error {\n\tif len(buf) < 2 || buf[0] != '\"' || buf[len(buf)-1] != '\"' {\n\t\treturn errors.New(\"datastore: bad JSON key\")\n\t}\n\tk2, err := DecodeKey(string(buf[1 : len(buf)-1]))\n\tif err != nil {\n\t\treturn err\n\t}\n\t*k = *k2\n\treturn nil\n}\n\n// Encode returns an opaque representation of the key\n// suitable for use in HTML and URLs.\n// This is compatible with the Python and Java runtimes.\nfunc (k *Key) Encode() string {\n\tref := keyToProto(\"\", k)\n\n\tb, err := proto.Marshal(ref)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Trailing padding is stripped.\n\treturn strings.TrimRight(base64.URLEncoding.EncodeToString(b), \"=\")\n}\n\n// DecodeKey decodes a key from the opaque representation returned by Encode.\nfunc DecodeKey(encoded string) (*Key, error) {\n\t// Re-add padding.\n\tif m := len(encoded) % 4; m != 0 {\n\t\tencoded += strings.Repeat(\"=\", 4-m)\n\t}\n\n\tb, err := base64.URLEncoding.DecodeString(encoded)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tref := new(pb.Reference)\n\tif err := proto.Unmarshal(b, ref); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn protoToKey(ref)\n}\n\n// NewIncompleteKey creates a new incomplete key.\n// kind cannot be empty.\nfunc NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {\n\treturn NewKey(c, kind, \"\", 0, parent)\n}\n\n// NewKey creates a new key.\n// kind cannot be empty.\n// Either one or both of stringID and intID must be zero. If both are zero,\n// the key returned is incomplete.\n// parent must either be a complete key or nil.\nfunc NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {\n\t// If there's a parent key, use its namespace.\n\t// Otherwise, use any namespace attached to the context.\n\tvar namespace string\n\tif parent != nil {\n\t\tnamespace = parent.namespace\n\t} else {\n\t\tnamespace = internal.NamespaceFromContext(c)\n\t}\n\n\treturn &Key{\n\t\tkind:      kind,\n\t\tstringID:  stringID,\n\t\tintID:     intID,\n\t\tparent:    parent,\n\t\tappID:     internal.FullyQualifiedAppID(c),\n\t\tnamespace: namespace,\n\t}\n}\n\n// AllocateIDs returns a range of n integer IDs with the given kind and parent\n// combination. kind cannot be empty; parent may be nil. The IDs in the range\n// returned will not be used by the datastore's automatic ID sequence generator\n// and may be used with NewKey without conflict.\n//\n// The range is inclusive at the low end and exclusive at the high end. In\n// other words, valid intIDs x satisfy low <= x && x < high.\n//\n// If no error is returned, low + n == high.\nfunc AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {\n\tif kind == \"\" {\n\t\treturn 0, 0, errors.New(\"datastore: AllocateIDs given an empty kind\")\n\t}\n\tif n < 0 {\n\t\treturn 0, 0, fmt.Errorf(\"datastore: AllocateIDs given a negative count: %d\", n)\n\t}\n\tif n == 0 {\n\t\treturn 0, 0, nil\n\t}\n\treq := &pb.AllocateIdsRequest{\n\t\tModelKey: keyToProto(\"\", NewIncompleteKey(c, kind, parent)),\n\t\tSize:     proto.Int64(int64(n)),\n\t}\n\tres := &pb.AllocateIdsResponse{}\n\tif err := internal.Call(c, \"datastore_v3\", \"AllocateIds\", req, res); err != nil {\n\t\treturn 0, 0, err\n\t}\n\t// The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)\n\t// is inclusive at the low end and exclusive at the high end, so we add 1.\n\tlow = res.GetStart()\n\thigh = res.GetEnd() + 1\n\tif low+int64(n) != high {\n\t\treturn 0, 0, fmt.Errorf(\"datastore: internal error: could not allocate %d IDs\", n)\n\t}\n\treturn low, high, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/load.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"google.golang.org/appengine\"\n\tpb \"google.golang.org/appengine/internal/datastore\"\n)\n\nvar (\n\ttypeOfBlobKey    = reflect.TypeOf(appengine.BlobKey(\"\"))\n\ttypeOfByteSlice  = reflect.TypeOf([]byte(nil))\n\ttypeOfByteString = reflect.TypeOf(ByteString(nil))\n\ttypeOfGeoPoint   = reflect.TypeOf(appengine.GeoPoint{})\n\ttypeOfTime       = reflect.TypeOf(time.Time{})\n)\n\n// typeMismatchReason returns a string explaining why the property p could not\n// be stored in an entity field of type v.Type().\nfunc typeMismatchReason(p Property, v reflect.Value) string {\n\tentityType := \"empty\"\n\tswitch p.Value.(type) {\n\tcase int64:\n\t\tentityType = \"int\"\n\tcase bool:\n\t\tentityType = \"bool\"\n\tcase string:\n\t\tentityType = \"string\"\n\tcase float64:\n\t\tentityType = \"float\"\n\tcase *Key:\n\t\tentityType = \"*datastore.Key\"\n\tcase time.Time:\n\t\tentityType = \"time.Time\"\n\tcase appengine.BlobKey:\n\t\tentityType = \"appengine.BlobKey\"\n\tcase appengine.GeoPoint:\n\t\tentityType = \"appengine.GeoPoint\"\n\tcase ByteString:\n\t\tentityType = \"datastore.ByteString\"\n\tcase []byte:\n\t\tentityType = \"[]byte\"\n\t}\n\treturn fmt.Sprintf(\"type mismatch: %s versus %v\", entityType, v.Type())\n}\n\ntype propertyLoader struct {\n\t// m holds the number of times a substruct field like \"Foo.Bar.Baz\" has\n\t// been seen so far. The map is constructed lazily.\n\tm map[string]int\n}\n\nfunc (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {\n\tvar v reflect.Value\n\t// Traverse a struct's struct-typed fields.\n\tfor name := p.Name; ; {\n\t\tdecoder, ok := codec.byName[name]\n\t\tif !ok {\n\t\t\treturn \"no such struct field\"\n\t\t}\n\t\tv = structValue.Field(decoder.index)\n\t\tif !v.IsValid() {\n\t\t\treturn \"no such struct field\"\n\t\t}\n\t\tif !v.CanSet() {\n\t\t\treturn \"cannot set struct field\"\n\t\t}\n\n\t\tif decoder.substructCodec == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif v.Kind() == reflect.Slice {\n\t\t\tif l.m == nil {\n\t\t\t\tl.m = make(map[string]int)\n\t\t\t}\n\t\t\tindex := l.m[p.Name]\n\t\t\tl.m[p.Name] = index + 1\n\t\t\tfor v.Len() <= index {\n\t\t\t\tv.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))\n\t\t\t}\n\t\t\tstructValue = v.Index(index)\n\t\t\trequireSlice = false\n\t\t} else {\n\t\t\tstructValue = v\n\t\t}\n\t\t// Strip the \"I.\" from \"I.X\".\n\t\tname = name[len(codec.byIndex[decoder.index].name):]\n\t\tcodec = decoder.substructCodec\n\t}\n\n\tvar slice reflect.Value\n\tif v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {\n\t\tslice = v\n\t\tv = reflect.New(v.Type().Elem()).Elem()\n\t} else if requireSlice {\n\t\treturn \"multiple-valued property requires a slice field type\"\n\t}\n\n\t// Convert indexValues to a Go value with a meaning derived from the\n\t// destination type.\n\tpValue := p.Value\n\tif iv, ok := pValue.(indexValue); ok {\n\t\tmeaning := pb.Property_NO_MEANING\n\t\tswitch v.Type() {\n\t\tcase typeOfBlobKey:\n\t\t\tmeaning = pb.Property_BLOBKEY\n\t\tcase typeOfByteSlice:\n\t\t\tmeaning = pb.Property_BLOB\n\t\tcase typeOfByteString:\n\t\t\tmeaning = pb.Property_BYTESTRING\n\t\tcase typeOfGeoPoint:\n\t\t\tmeaning = pb.Property_GEORSS_POINT\n\t\tcase typeOfTime:\n\t\t\tmeaning = pb.Property_GD_WHEN\n\t\t}\n\t\tvar err error\n\t\tpValue, err = propValue(iv.value, meaning)\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\t}\n\n\tswitch v.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tx, ok := pValue.(int64)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif v.OverflowInt(x) {\n\t\t\treturn fmt.Sprintf(\"value %v overflows struct field of type %v\", x, v.Type())\n\t\t}\n\t\tv.SetInt(x)\n\tcase reflect.Bool:\n\t\tx, ok := pValue.(bool)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tv.SetBool(x)\n\tcase reflect.String:\n\t\tswitch x := pValue.(type) {\n\t\tcase appengine.BlobKey:\n\t\t\tv.SetString(string(x))\n\t\tcase ByteString:\n\t\t\tv.SetString(string(x))\n\t\tcase string:\n\t\t\tv.SetString(x)\n\t\tdefault:\n\t\t\tif pValue != nil {\n\t\t\t\treturn typeMismatchReason(p, v)\n\t\t\t}\n\t\t}\n\tcase reflect.Float32, reflect.Float64:\n\t\tx, ok := pValue.(float64)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif v.OverflowFloat(x) {\n\t\t\treturn fmt.Sprintf(\"value %v overflows struct field of type %v\", x, v.Type())\n\t\t}\n\t\tv.SetFloat(x)\n\tcase reflect.Ptr:\n\t\tx, ok := pValue.(*Key)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif _, ok := v.Interface().(*Key); !ok {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tv.Set(reflect.ValueOf(x))\n\tcase reflect.Struct:\n\t\tswitch v.Type() {\n\t\tcase typeOfTime:\n\t\t\tx, ok := pValue.(time.Time)\n\t\t\tif !ok && pValue != nil {\n\t\t\t\treturn typeMismatchReason(p, v)\n\t\t\t}\n\t\t\tv.Set(reflect.ValueOf(x))\n\t\tcase typeOfGeoPoint:\n\t\t\tx, ok := pValue.(appengine.GeoPoint)\n\t\t\tif !ok && pValue != nil {\n\t\t\t\treturn typeMismatchReason(p, v)\n\t\t\t}\n\t\t\tv.Set(reflect.ValueOf(x))\n\t\tdefault:\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\tcase reflect.Slice:\n\t\tx, ok := pValue.([]byte)\n\t\tif !ok {\n\t\t\tif y, yok := pValue.(ByteString); yok {\n\t\t\t\tx, ok = []byte(y), true\n\t\t\t}\n\t\t}\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tv.SetBytes(x)\n\tdefault:\n\t\treturn typeMismatchReason(p, v)\n\t}\n\tif slice.IsValid() {\n\t\tslice.Set(reflect.Append(slice, v))\n\t}\n\treturn \"\"\n}\n\n// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.\nfunc loadEntity(dst interface{}, src *pb.EntityProto) (err error) {\n\tprops, err := protoToProperties(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif e, ok := dst.(PropertyLoadSaver); ok {\n\t\treturn e.Load(props)\n\t}\n\treturn LoadStruct(dst, props)\n}\n\nfunc (s structPLS) Load(props []Property) error {\n\tvar fieldName, reason string\n\tvar l propertyLoader\n\tfor _, p := range props {\n\t\tif errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != \"\" {\n\t\t\t// We don't return early, as we try to load as many properties as possible.\n\t\t\t// It is valid to load an entity into a struct that cannot fully represent it.\n\t\t\t// That case returns an error, but the caller is free to ignore it.\n\t\t\tfieldName, reason = p.Name, errStr\n\t\t}\n\t}\n\tif reason != \"\" {\n\t\treturn &ErrFieldMismatch{\n\t\t\tStructType: s.v.Type(),\n\t\t\tFieldName:  fieldName,\n\t\t\tReason:     reason,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc protoToProperties(src *pb.EntityProto) ([]Property, error) {\n\tprops, rawProps := src.Property, src.RawProperty\n\tout := make([]Property, 0, len(props)+len(rawProps))\n\tfor {\n\t\tvar (\n\t\t\tx       *pb.Property\n\t\t\tnoIndex bool\n\t\t)\n\t\tif len(props) > 0 {\n\t\t\tx, props = props[0], props[1:]\n\t\t} else if len(rawProps) > 0 {\n\t\t\tx, rawProps = rawProps[0], rawProps[1:]\n\t\t\tnoIndex = true\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t\tvar value interface{}\n\t\tif x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {\n\t\t\tvalue = indexValue{x.Value}\n\t\t} else {\n\t\t\tvar err error\n\t\t\tvalue, err = propValue(x.Value, x.GetMeaning())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tout = append(out, Property{\n\t\t\tName:     x.GetName(),\n\t\t\tValue:    value,\n\t\t\tNoIndex:  noIndex,\n\t\t\tMultiple: x.GetMultiple(),\n\t\t})\n\t}\n\treturn out, nil\n}\n\n// propValue returns a Go value that combines the raw PropertyValue with a\n// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.\nfunc propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {\n\tswitch {\n\tcase v.Int64Value != nil:\n\t\tif m == pb.Property_GD_WHEN {\n\t\t\treturn fromUnixMicro(*v.Int64Value), nil\n\t\t} else {\n\t\t\treturn *v.Int64Value, nil\n\t\t}\n\tcase v.BooleanValue != nil:\n\t\treturn *v.BooleanValue, nil\n\tcase v.StringValue != nil:\n\t\tif m == pb.Property_BLOB {\n\t\t\treturn []byte(*v.StringValue), nil\n\t\t} else if m == pb.Property_BLOBKEY {\n\t\t\treturn appengine.BlobKey(*v.StringValue), nil\n\t\t} else if m == pb.Property_BYTESTRING {\n\t\t\treturn ByteString(*v.StringValue), nil\n\t\t} else {\n\t\t\treturn *v.StringValue, nil\n\t\t}\n\tcase v.DoubleValue != nil:\n\t\treturn *v.DoubleValue, nil\n\tcase v.Referencevalue != nil:\n\t\tkey, err := referenceValueToKey(v.Referencevalue)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn key, nil\n\tcase v.Pointvalue != nil:\n\t\t// NOTE: Strangely, latitude maps to X, longitude to Y.\n\t\treturn appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil\n\t}\n\treturn nil, nil\n}\n\n// indexValue is a Property value that is created when entities are loaded from\n// an index, such as from a projection query.\n//\n// Such Property values do not contain all of the metadata required to be\n// faithfully represented as a Go value, and are instead represented as an\n// opaque indexValue. Load the properties into a concrete struct type (e.g. by\n// passing a struct pointer to Iterator.Next) to reconstruct actual Go values\n// of type int, string, time.Time, etc.\ntype indexValue struct {\n\tvalue *pb.PropertyValue\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/metadata.go",
    "content": "// Copyright 2016 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport \"golang.org/x/net/context\"\n\n// Datastore kinds for the metadata entities.\nconst (\n\tnamespaceKind   = \"__namespace__\"\n\tkindKind        = \"__kind__\"\n\tpropertyKind    = \"__property__\"\n\tentityGroupKind = \"__entitygroup__\"\n)\n\n// Namespaces returns all the datastore namespaces.\nfunc Namespaces(ctx context.Context) ([]string, error) {\n\t// TODO(djd): Support range queries.\n\tq := NewQuery(namespaceKind).KeysOnly()\n\tkeys, err := q.GetAll(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// The empty namespace key uses a numeric ID (==1), but luckily\n\t// the string ID defaults to \"\" for numeric IDs anyway.\n\treturn keyNames(keys), nil\n}\n\n// Kinds returns the names of all the kinds in the current namespace.\nfunc Kinds(ctx context.Context) ([]string, error) {\n\t// TODO(djd): Support range queries.\n\tq := NewQuery(kindKind).KeysOnly()\n\tkeys, err := q.GetAll(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn keyNames(keys), nil\n}\n\n// keyNames returns a slice of the provided keys' names (string IDs).\nfunc keyNames(keys []*Key) []string {\n\tn := make([]string, 0, len(keys))\n\tfor _, k := range keys {\n\t\tn = append(n, k.StringID())\n\t}\n\treturn n\n}\n\n// KindProperties returns all the indexed properties for the given kind.\n// The properties are returned as a map of property names to a slice of the\n// representation types. The representation types for the supported Go property\n// types are:\n//   \"INT64\":     signed integers and time.Time\n//   \"DOUBLE\":    float32 and float64\n//   \"BOOLEAN\":   bool\n//   \"STRING\":    string, []byte and ByteString\n//   \"POINT\":     appengine.GeoPoint\n//   \"REFERENCE\": *Key\n//   \"USER\":      (not used in the Go runtime)\nfunc KindProperties(ctx context.Context, kind string) (map[string][]string, error) {\n\t// TODO(djd): Support range queries.\n\tkindKey := NewKey(ctx, kindKind, kind, 0, nil)\n\tq := NewQuery(propertyKind).Ancestor(kindKey)\n\n\tpropMap := map[string][]string{}\n\tprops := []struct {\n\t\tRepr []string `datastore:property_representation`\n\t}{}\n\n\tkeys, err := q.GetAll(ctx, &props)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, p := range props {\n\t\tpropMap[keys[i].StringID()] = p.Repr\n\t}\n\treturn propMap, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/prop.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"unicode\"\n)\n\n// Entities with more than this many indexed properties will not be saved.\nconst maxIndexedProperties = 20000\n\n// []byte fields more than 1 megabyte long will not be loaded or saved.\nconst maxBlobLen = 1 << 20\n\n// Property is a name/value pair plus some metadata. A datastore entity's\n// contents are loaded and saved as a sequence of Properties. An entity can\n// have multiple Properties with the same name, provided that p.Multiple is\n// true on all of that entity's Properties with that name.\ntype Property struct {\n\t// Name is the property name.\n\tName string\n\t// Value is the property value. The valid types are:\n\t//\t- int64\n\t//\t- bool\n\t//\t- string\n\t//\t- float64\n\t//\t- ByteString\n\t//\t- *Key\n\t//\t- time.Time\n\t//\t- appengine.BlobKey\n\t//\t- appengine.GeoPoint\n\t//\t- []byte (up to 1 megabyte in length)\n\t// This set is smaller than the set of valid struct field types that the\n\t// datastore can load and save. A Property Value cannot be a slice (apart\n\t// from []byte); use multiple Properties instead. Also, a Value's type\n\t// must be explicitly on the list above; it is not sufficient for the\n\t// underlying type to be on that list. For example, a Value of \"type\n\t// myInt64 int64\" is invalid. Smaller-width integers and floats are also\n\t// invalid. Again, this is more restrictive than the set of valid struct\n\t// field types.\n\t//\n\t// A Value will have an opaque type when loading entities from an index,\n\t// such as via a projection query. Load entities into a struct instead\n\t// of a PropertyLoadSaver when using a projection query.\n\t//\n\t// A Value may also be the nil interface value; this is equivalent to\n\t// Python's None but not directly representable by a Go struct. Loading\n\t// a nil-valued property into a struct will set that field to the zero\n\t// value.\n\tValue interface{}\n\t// NoIndex is whether the datastore cannot index this property.\n\tNoIndex bool\n\t// Multiple is whether the entity can have multiple properties with\n\t// the same name. Even if a particular instance only has one property with\n\t// a certain name, Multiple should be true if a struct would best represent\n\t// it as a field of type []T instead of type T.\n\tMultiple bool\n}\n\n// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.\ntype ByteString []byte\n\n// PropertyLoadSaver can be converted from and to a slice of Properties.\ntype PropertyLoadSaver interface {\n\tLoad([]Property) error\n\tSave() ([]Property, error)\n}\n\n// PropertyList converts a []Property to implement PropertyLoadSaver.\ntype PropertyList []Property\n\nvar (\n\ttypeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()\n\ttypeOfPropertyList      = reflect.TypeOf(PropertyList(nil))\n)\n\n// Load loads all of the provided properties into l.\n// It does not first reset *l to an empty slice.\nfunc (l *PropertyList) Load(p []Property) error {\n\t*l = append(*l, p...)\n\treturn nil\n}\n\n// Save saves all of l's properties as a slice or Properties.\nfunc (l *PropertyList) Save() ([]Property, error) {\n\treturn *l, nil\n}\n\n// validPropertyName returns whether name consists of one or more valid Go\n// identifiers joined by \".\".\nfunc validPropertyName(name string) bool {\n\tif name == \"\" {\n\t\treturn false\n\t}\n\tfor _, s := range strings.Split(name, \".\") {\n\t\tif s == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tfirst := true\n\t\tfor _, c := range s {\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t\tif c != '_' && !unicode.IsLetter(c) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\n// structTag is the parsed `datastore:\"name,options\"` tag of a struct field.\n// If a field has no tag, or the tag has an empty name, then the structTag's\n// name is just the field name. A \"-\" name means that the datastore ignores\n// that field.\ntype structTag struct {\n\tname    string\n\tnoIndex bool\n}\n\n// structCodec describes how to convert a struct to and from a sequence of\n// properties.\ntype structCodec struct {\n\t// byIndex gives the structTag for the i'th field.\n\tbyIndex []structTag\n\t// byName gives the field codec for the structTag with the given name.\n\tbyName map[string]fieldCodec\n\t// hasSlice is whether a struct or any of its nested or embedded structs\n\t// has a slice-typed field (other than []byte).\n\thasSlice bool\n\t// complete is whether the structCodec is complete. An incomplete\n\t// structCodec may be encountered when walking a recursive struct.\n\tcomplete bool\n}\n\n// fieldCodec is a struct field's index and, if that struct field's type is\n// itself a struct, that substruct's structCodec.\ntype fieldCodec struct {\n\tindex          int\n\tsubstructCodec *structCodec\n}\n\n// structCodecs collects the structCodecs that have already been calculated.\nvar (\n\tstructCodecsMutex sync.Mutex\n\tstructCodecs      = make(map[reflect.Type]*structCodec)\n)\n\n// getStructCodec returns the structCodec for the given struct type.\nfunc getStructCodec(t reflect.Type) (*structCodec, error) {\n\tstructCodecsMutex.Lock()\n\tdefer structCodecsMutex.Unlock()\n\treturn getStructCodecLocked(t)\n}\n\n// getStructCodecLocked implements getStructCodec. The structCodecsMutex must\n// be held when calling this function.\nfunc getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {\n\tc, ok := structCodecs[t]\n\tif ok {\n\t\treturn c, nil\n\t}\n\tc = &structCodec{\n\t\tbyIndex: make([]structTag, t.NumField()),\n\t\tbyName:  make(map[string]fieldCodec),\n\t}\n\n\t// Add c to the structCodecs map before we are sure it is good. If t is\n\t// a recursive type, it needs to find the incomplete entry for itself in\n\t// the map.\n\tstructCodecs[t] = c\n\tdefer func() {\n\t\tif retErr != nil {\n\t\t\tdelete(structCodecs, t)\n\t\t}\n\t}()\n\n\tfor i := range c.byIndex {\n\t\tf := t.Field(i)\n\t\tname, opts := f.Tag.Get(\"datastore\"), \"\"\n\t\tif i := strings.Index(name, \",\"); i != -1 {\n\t\t\tname, opts = name[:i], name[i+1:]\n\t\t}\n\t\tif name == \"\" {\n\t\t\tif !f.Anonymous {\n\t\t\t\tname = f.Name\n\t\t\t}\n\t\t} else if name == \"-\" {\n\t\t\tc.byIndex[i] = structTag{name: name}\n\t\t\tcontinue\n\t\t} else if !validPropertyName(name) {\n\t\t\treturn nil, fmt.Errorf(\"datastore: struct tag has invalid property name: %q\", name)\n\t\t}\n\n\t\tsubstructType, fIsSlice := reflect.Type(nil), false\n\t\tswitch f.Type.Kind() {\n\t\tcase reflect.Struct:\n\t\t\tsubstructType = f.Type\n\t\tcase reflect.Slice:\n\t\t\tif f.Type.Elem().Kind() == reflect.Struct {\n\t\t\t\tsubstructType = f.Type.Elem()\n\t\t\t}\n\t\t\tfIsSlice = f.Type != typeOfByteSlice\n\t\t\tc.hasSlice = c.hasSlice || fIsSlice\n\t\t}\n\n\t\tif substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {\n\t\t\tif name != \"\" {\n\t\t\t\tname = name + \".\"\n\t\t\t}\n\t\t\tsub, err := getStructCodecLocked(substructType)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !sub.complete {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: recursive struct: field %q\", f.Name)\n\t\t\t}\n\t\t\tif fIsSlice && sub.hasSlice {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"datastore: flattening nested structs leads to a slice of slices: field %q\", f.Name)\n\t\t\t}\n\t\t\tc.hasSlice = c.hasSlice || sub.hasSlice\n\t\t\tfor relName := range sub.byName {\n\t\t\t\tabsName := name + relName\n\t\t\t\tif _, ok := c.byName[absName]; ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"datastore: struct tag has repeated property name: %q\", absName)\n\t\t\t\t}\n\t\t\t\tc.byName[absName] = fieldCodec{index: i, substructCodec: sub}\n\t\t\t}\n\t\t} else {\n\t\t\tif _, ok := c.byName[name]; ok {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: struct tag has repeated property name: %q\", name)\n\t\t\t}\n\t\t\tc.byName[name] = fieldCodec{index: i}\n\t\t}\n\n\t\tc.byIndex[i] = structTag{\n\t\t\tname:    name,\n\t\t\tnoIndex: opts == \"noindex\",\n\t\t}\n\t}\n\tc.complete = true\n\treturn c, nil\n}\n\n// structPLS adapts a struct to be a PropertyLoadSaver.\ntype structPLS struct {\n\tv     reflect.Value\n\tcodec *structCodec\n}\n\n// newStructPLS returns a PropertyLoadSaver for the struct pointer p.\nfunc newStructPLS(p interface{}) (PropertyLoadSaver, error) {\n\tv := reflect.ValueOf(p)\n\tif v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {\n\t\treturn nil, ErrInvalidEntityType\n\t}\n\tv = v.Elem()\n\tcodec, err := getStructCodec(v.Type())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn structPLS{v, codec}, nil\n}\n\n// LoadStruct loads the properties from p to dst.\n// dst must be a struct pointer.\nfunc LoadStruct(dst interface{}, p []Property) error {\n\tx, err := newStructPLS(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn x.Load(p)\n}\n\n// SaveStruct returns the properties from src as a slice of Properties.\n// src must be a struct pointer.\nfunc SaveStruct(src interface{}) ([]Property, error) {\n\tx, err := newStructPLS(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn x.Save()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/query.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport (\n\t\"encoding/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/datastore\"\n)\n\ntype operator int\n\nconst (\n\tlessThan operator = iota\n\tlessEq\n\tequal\n\tgreaterEq\n\tgreaterThan\n)\n\nvar operatorToProto = map[operator]*pb.Query_Filter_Operator{\n\tlessThan:    pb.Query_Filter_LESS_THAN.Enum(),\n\tlessEq:      pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),\n\tequal:       pb.Query_Filter_EQUAL.Enum(),\n\tgreaterEq:   pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),\n\tgreaterThan: pb.Query_Filter_GREATER_THAN.Enum(),\n}\n\n// filter is a conditional filter on query results.\ntype filter struct {\n\tFieldName string\n\tOp        operator\n\tValue     interface{}\n}\n\ntype sortDirection int\n\nconst (\n\tascending sortDirection = iota\n\tdescending\n)\n\nvar sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{\n\tascending:  pb.Query_Order_ASCENDING.Enum(),\n\tdescending: pb.Query_Order_DESCENDING.Enum(),\n}\n\n// order is a sort order on query results.\ntype order struct {\n\tFieldName string\n\tDirection sortDirection\n}\n\n// NewQuery creates a new Query for a specific entity kind.\n//\n// An empty kind means to return all entities, including entities created and\n// managed by other App Engine features, and is called a kindless query.\n// Kindless queries cannot include filters or sort orders on property values.\nfunc NewQuery(kind string) *Query {\n\treturn &Query{\n\t\tkind:  kind,\n\t\tlimit: -1,\n\t}\n}\n\n// Query represents a datastore query.\ntype Query struct {\n\tkind       string\n\tancestor   *Key\n\tfilter     []filter\n\torder      []order\n\tprojection []string\n\n\tdistinct bool\n\tkeysOnly bool\n\teventual bool\n\tlimit    int32\n\toffset   int32\n\tstart    *pb.CompiledCursor\n\tend      *pb.CompiledCursor\n\n\terr error\n}\n\nfunc (q *Query) clone() *Query {\n\tx := *q\n\t// Copy the contents of the slice-typed fields to a new backing store.\n\tif len(q.filter) > 0 {\n\t\tx.filter = make([]filter, len(q.filter))\n\t\tcopy(x.filter, q.filter)\n\t}\n\tif len(q.order) > 0 {\n\t\tx.order = make([]order, len(q.order))\n\t\tcopy(x.order, q.order)\n\t}\n\treturn &x\n}\n\n// Ancestor returns a derivative query with an ancestor filter.\n// The ancestor should not be nil.\nfunc (q *Query) Ancestor(ancestor *Key) *Query {\n\tq = q.clone()\n\tif ancestor == nil {\n\t\tq.err = errors.New(\"datastore: nil query ancestor\")\n\t\treturn q\n\t}\n\tq.ancestor = ancestor\n\treturn q\n}\n\n// EventualConsistency returns a derivative query that returns eventually\n// consistent results.\n// It only has an effect on ancestor queries.\nfunc (q *Query) EventualConsistency() *Query {\n\tq = q.clone()\n\tq.eventual = true\n\treturn q\n}\n\n// Filter returns a derivative query with a field-based filter.\n// The filterStr argument must be a field name followed by optional space,\n// followed by an operator, one of \">\", \"<\", \">=\", \"<=\", or \"=\".\n// Fields are compared against the provided value using the operator.\n// Multiple filters are AND'ed together.\nfunc (q *Query) Filter(filterStr string, value interface{}) *Query {\n\tq = q.clone()\n\tfilterStr = strings.TrimSpace(filterStr)\n\tif len(filterStr) < 1 {\n\t\tq.err = errors.New(\"datastore: invalid filter: \" + filterStr)\n\t\treturn q\n\t}\n\tf := filter{\n\t\tFieldName: strings.TrimRight(filterStr, \" ><=!\"),\n\t\tValue:     value,\n\t}\n\tswitch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {\n\tcase \"<=\":\n\t\tf.Op = lessEq\n\tcase \">=\":\n\t\tf.Op = greaterEq\n\tcase \"<\":\n\t\tf.Op = lessThan\n\tcase \">\":\n\t\tf.Op = greaterThan\n\tcase \"=\":\n\t\tf.Op = equal\n\tdefault:\n\t\tq.err = fmt.Errorf(\"datastore: invalid operator %q in filter %q\", op, filterStr)\n\t\treturn q\n\t}\n\tq.filter = append(q.filter, f)\n\treturn q\n}\n\n// Order returns a derivative query with a field-based sort order. Orders are\n// applied in the order they are added. The default order is ascending; to sort\n// in descending order prefix the fieldName with a minus sign (-).\nfunc (q *Query) Order(fieldName string) *Query {\n\tq = q.clone()\n\tfieldName = strings.TrimSpace(fieldName)\n\to := order{\n\t\tDirection: ascending,\n\t\tFieldName: fieldName,\n\t}\n\tif strings.HasPrefix(fieldName, \"-\") {\n\t\to.Direction = descending\n\t\to.FieldName = strings.TrimSpace(fieldName[1:])\n\t} else if strings.HasPrefix(fieldName, \"+\") {\n\t\tq.err = fmt.Errorf(\"datastore: invalid order: %q\", fieldName)\n\t\treturn q\n\t}\n\tif len(o.FieldName) == 0 {\n\t\tq.err = errors.New(\"datastore: empty order\")\n\t\treturn q\n\t}\n\tq.order = append(q.order, o)\n\treturn q\n}\n\n// Project returns a derivative query that yields only the given fields. It\n// cannot be used with KeysOnly.\nfunc (q *Query) Project(fieldNames ...string) *Query {\n\tq = q.clone()\n\tq.projection = append([]string(nil), fieldNames...)\n\treturn q\n}\n\n// Distinct returns a derivative query that yields de-duplicated entities with\n// respect to the set of projected fields. It is only used for projection\n// queries.\nfunc (q *Query) Distinct() *Query {\n\tq = q.clone()\n\tq.distinct = true\n\treturn q\n}\n\n// KeysOnly returns a derivative query that yields only keys, not keys and\n// entities. It cannot be used with projection queries.\nfunc (q *Query) KeysOnly() *Query {\n\tq = q.clone()\n\tq.keysOnly = true\n\treturn q\n}\n\n// Limit returns a derivative query that has a limit on the number of results\n// returned. A negative value means unlimited.\nfunc (q *Query) Limit(limit int) *Query {\n\tq = q.clone()\n\tif limit < math.MinInt32 || limit > math.MaxInt32 {\n\t\tq.err = errors.New(\"datastore: query limit overflow\")\n\t\treturn q\n\t}\n\tq.limit = int32(limit)\n\treturn q\n}\n\n// Offset returns a derivative query that has an offset of how many keys to\n// skip over before returning results. A negative value is invalid.\nfunc (q *Query) Offset(offset int) *Query {\n\tq = q.clone()\n\tif offset < 0 {\n\t\tq.err = errors.New(\"datastore: negative query offset\")\n\t\treturn q\n\t}\n\tif offset > math.MaxInt32 {\n\t\tq.err = errors.New(\"datastore: query offset overflow\")\n\t\treturn q\n\t}\n\tq.offset = int32(offset)\n\treturn q\n}\n\n// Start returns a derivative query with the given start point.\nfunc (q *Query) Start(c Cursor) *Query {\n\tq = q.clone()\n\tif c.cc == nil {\n\t\tq.err = errors.New(\"datastore: invalid cursor\")\n\t\treturn q\n\t}\n\tq.start = c.cc\n\treturn q\n}\n\n// End returns a derivative query with the given end point.\nfunc (q *Query) End(c Cursor) *Query {\n\tq = q.clone()\n\tif c.cc == nil {\n\t\tq.err = errors.New(\"datastore: invalid cursor\")\n\t\treturn q\n\t}\n\tq.end = c.cc\n\treturn q\n}\n\n// toProto converts the query to a protocol buffer.\nfunc (q *Query) toProto(dst *pb.Query, appID string) error {\n\tif len(q.projection) != 0 && q.keysOnly {\n\t\treturn errors.New(\"datastore: query cannot both project and be keys-only\")\n\t}\n\tdst.Reset()\n\tdst.App = proto.String(appID)\n\tif q.kind != \"\" {\n\t\tdst.Kind = proto.String(q.kind)\n\t}\n\tif q.ancestor != nil {\n\t\tdst.Ancestor = keyToProto(appID, q.ancestor)\n\t\tif q.eventual {\n\t\t\tdst.Strong = proto.Bool(false)\n\t\t}\n\t}\n\tif q.projection != nil {\n\t\tdst.PropertyName = q.projection\n\t\tif q.distinct {\n\t\t\tdst.GroupByPropertyName = q.projection\n\t\t}\n\t}\n\tif q.keysOnly {\n\t\tdst.KeysOnly = proto.Bool(true)\n\t\tdst.RequirePerfectPlan = proto.Bool(true)\n\t}\n\tfor _, qf := range q.filter {\n\t\tif qf.FieldName == \"\" {\n\t\t\treturn errors.New(\"datastore: empty query filter field name\")\n\t\t}\n\t\tp, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)\n\t\tif errStr != \"\" {\n\t\t\treturn errors.New(\"datastore: bad query filter value type: \" + errStr)\n\t\t}\n\t\txf := &pb.Query_Filter{\n\t\t\tOp:       operatorToProto[qf.Op],\n\t\t\tProperty: []*pb.Property{p},\n\t\t}\n\t\tif xf.Op == nil {\n\t\t\treturn errors.New(\"datastore: unknown query filter operator\")\n\t\t}\n\t\tdst.Filter = append(dst.Filter, xf)\n\t}\n\tfor _, qo := range q.order {\n\t\tif qo.FieldName == \"\" {\n\t\t\treturn errors.New(\"datastore: empty query order field name\")\n\t\t}\n\t\txo := &pb.Query_Order{\n\t\t\tProperty:  proto.String(qo.FieldName),\n\t\t\tDirection: sortDirectionToProto[qo.Direction],\n\t\t}\n\t\tif xo.Direction == nil {\n\t\t\treturn errors.New(\"datastore: unknown query order direction\")\n\t\t}\n\t\tdst.Order = append(dst.Order, xo)\n\t}\n\tif q.limit >= 0 {\n\t\tdst.Limit = proto.Int32(q.limit)\n\t}\n\tif q.offset != 0 {\n\t\tdst.Offset = proto.Int32(q.offset)\n\t}\n\tdst.CompiledCursor = q.start\n\tdst.EndCompiledCursor = q.end\n\tdst.Compile = proto.Bool(true)\n\treturn nil\n}\n\n// Count returns the number of results for the query.\nfunc (q *Query) Count(c context.Context) (int, error) {\n\t// Check that the query is well-formed.\n\tif q.err != nil {\n\t\treturn 0, q.err\n\t}\n\n\t// Run a copy of the query, with keysOnly true (if we're not a projection,\n\t// since the two are incompatible), and an adjusted offset. We also set the\n\t// limit to zero, as we don't want any actual entity data, just the number\n\t// of skipped results.\n\tnewQ := q.clone()\n\tnewQ.keysOnly = len(newQ.projection) == 0\n\tnewQ.limit = 0\n\tif q.limit < 0 {\n\t\t// If the original query was unlimited, set the new query's offset to maximum.\n\t\tnewQ.offset = math.MaxInt32\n\t} else {\n\t\tnewQ.offset = q.offset + q.limit\n\t\tif newQ.offset < 0 {\n\t\t\t// Do the best we can, in the presence of overflow.\n\t\t\tnewQ.offset = math.MaxInt32\n\t\t}\n\t}\n\treq := &pb.Query{}\n\tif err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {\n\t\treturn 0, err\n\t}\n\tres := &pb.QueryResult{}\n\tif err := internal.Call(c, \"datastore_v3\", \"RunQuery\", req, res); err != nil {\n\t\treturn 0, err\n\t}\n\n\t// n is the count we will return. For example, suppose that our original\n\t// query had an offset of 4 and a limit of 2008: the count will be 2008,\n\t// provided that there are at least 2012 matching entities. However, the\n\t// RPCs will only skip 1000 results at a time. The RPC sequence is:\n\t//   call RunQuery with (offset, limit) = (2012, 0)  // 2012 == newQ.offset\n\t//   response has (skippedResults, moreResults) = (1000, true)\n\t//   n += 1000  // n == 1000\n\t//   call Next     with (offset, limit) = (1012, 0)  // 1012 == newQ.offset - n\n\t//   response has (skippedResults, moreResults) = (1000, true)\n\t//   n += 1000  // n == 2000\n\t//   call Next     with (offset, limit) = (12, 0)    // 12 == newQ.offset - n\n\t//   response has (skippedResults, moreResults) = (12, false)\n\t//   n += 12    // n == 2012\n\t//   // exit the loop\n\t//   n -= 4     // n == 2008\n\tvar n int32\n\tfor {\n\t\t// The QueryResult should have no actual entity data, just skipped results.\n\t\tif len(res.Result) != 0 {\n\t\t\treturn 0, errors.New(\"datastore: internal error: Count request returned too much data\")\n\t\t}\n\t\tn += res.GetSkippedResults()\n\t\tif !res.GetMoreResults() {\n\t\t\tbreak\n\t\t}\n\t\tif err := callNext(c, res, newQ.offset-n, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tn -= q.offset\n\tif n < 0 {\n\t\t// If the offset was greater than the number of matching entities,\n\t\t// return 0 instead of negative.\n\t\tn = 0\n\t}\n\treturn int(n), nil\n}\n\n// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that\n// returned by a query with more results.\nfunc callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error {\n\tif res.Cursor == nil {\n\t\treturn errors.New(\"datastore: internal error: server did not return a cursor\")\n\t}\n\treq := &pb.NextRequest{\n\t\tCursor: res.Cursor,\n\t}\n\tif limit >= 0 {\n\t\treq.Count = proto.Int32(limit)\n\t}\n\tif offset != 0 {\n\t\treq.Offset = proto.Int32(offset)\n\t}\n\tif res.CompiledCursor != nil {\n\t\treq.Compile = proto.Bool(true)\n\t}\n\tres.Reset()\n\treturn internal.Call(c, \"datastore_v3\", \"Next\", req, res)\n}\n\n// GetAll runs the query in the given context and returns all keys that match\n// that query, as well as appending the values to dst.\n//\n// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-\n// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.\n//\n// As a special case, *PropertyList is an invalid type for dst, even though a\n// PropertyList is a slice of structs. It is treated as invalid to avoid being\n// mistakenly passed when *[]PropertyList was intended.\n//\n// The keys returned by GetAll will be in a 1-1 correspondence with the entities\n// added to dst.\n//\n// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.\nfunc (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {\n\tvar (\n\t\tdv               reflect.Value\n\t\tmat              multiArgType\n\t\telemType         reflect.Type\n\t\terrFieldMismatch error\n\t)\n\tif !q.keysOnly {\n\t\tdv = reflect.ValueOf(dst)\n\t\tif dv.Kind() != reflect.Ptr || dv.IsNil() {\n\t\t\treturn nil, ErrInvalidEntityType\n\t\t}\n\t\tdv = dv.Elem()\n\t\tmat, elemType = checkMultiArg(dv)\n\t\tif mat == multiArgTypeInvalid || mat == multiArgTypeInterface {\n\t\t\treturn nil, ErrInvalidEntityType\n\t\t}\n\t}\n\n\tvar keys []*Key\n\tfor t := q.Run(c); ; {\n\t\tk, e, err := t.next()\n\t\tif err == Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn keys, err\n\t\t}\n\t\tif !q.keysOnly {\n\t\t\tev := reflect.New(elemType)\n\t\t\tif elemType.Kind() == reflect.Map {\n\t\t\t\t// This is a special case. The zero values of a map type are\n\t\t\t\t// not immediately useful; they have to be make'd.\n\t\t\t\t//\n\t\t\t\t// Funcs and channels are similar, in that a zero value is not useful,\n\t\t\t\t// but even a freshly make'd channel isn't useful: there's no fixed\n\t\t\t\t// channel buffer size that is always going to be large enough, and\n\t\t\t\t// there's no goroutine to drain the other end. Theoretically, these\n\t\t\t\t// types could be supported, for example by sniffing for a constructor\n\t\t\t\t// method or requiring prior registration, but for now it's not a\n\t\t\t\t// frequent enough concern to be worth it. Programmers can work around\n\t\t\t\t// it by explicitly using Iterator.Next instead of the Query.GetAll\n\t\t\t\t// convenience method.\n\t\t\t\tx := reflect.MakeMap(elemType)\n\t\t\t\tev.Elem().Set(x)\n\t\t\t}\n\t\t\tif err = loadEntity(ev.Interface(), e); err != nil {\n\t\t\t\tif _, ok := err.(*ErrFieldMismatch); ok {\n\t\t\t\t\t// We continue loading entities even in the face of field mismatch errors.\n\t\t\t\t\t// If we encounter any other error, that other error is returned. Otherwise,\n\t\t\t\t\t// an ErrFieldMismatch is returned.\n\t\t\t\t\terrFieldMismatch = err\n\t\t\t\t} else {\n\t\t\t\t\treturn keys, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif mat != multiArgTypeStructPtr {\n\t\t\t\tev = ev.Elem()\n\t\t\t}\n\t\t\tdv.Set(reflect.Append(dv, ev))\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys, errFieldMismatch\n}\n\n// Run runs the query in the given context.\nfunc (q *Query) Run(c context.Context) *Iterator {\n\tif q.err != nil {\n\t\treturn &Iterator{err: q.err}\n\t}\n\tt := &Iterator{\n\t\tc:      c,\n\t\tlimit:  q.limit,\n\t\tq:      q,\n\t\tprevCC: q.start,\n\t}\n\tvar req pb.Query\n\tif err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {\n\t\tt.err = err\n\t\treturn t\n\t}\n\tif err := internal.Call(c, \"datastore_v3\", \"RunQuery\", &req, &t.res); err != nil {\n\t\tt.err = err\n\t\treturn t\n\t}\n\toffset := q.offset - t.res.GetSkippedResults()\n\tfor offset > 0 && t.res.GetMoreResults() {\n\t\tt.prevCC = t.res.CompiledCursor\n\t\tif err := callNext(t.c, &t.res, offset, t.limit); err != nil {\n\t\t\tt.err = err\n\t\t\tbreak\n\t\t}\n\t\tskip := t.res.GetSkippedResults()\n\t\tif skip < 0 {\n\t\t\tt.err = errors.New(\"datastore: internal error: negative number of skipped_results\")\n\t\t\tbreak\n\t\t}\n\t\toffset -= skip\n\t}\n\tif offset < 0 {\n\t\tt.err = errors.New(\"datastore: internal error: query offset was overshot\")\n\t}\n\treturn t\n}\n\n// Iterator is the result of running a query.\ntype Iterator struct {\n\tc   context.Context\n\terr error\n\t// res is the result of the most recent RunQuery or Next API call.\n\tres pb.QueryResult\n\t// i is how many elements of res.Result we have iterated over.\n\ti int\n\t// limit is the limit on the number of results this iterator should return.\n\t// A negative value means unlimited.\n\tlimit int32\n\t// q is the original query which yielded this iterator.\n\tq *Query\n\t// prevCC is the compiled cursor that marks the end of the previous batch\n\t// of results.\n\tprevCC *pb.CompiledCursor\n}\n\n// Done is returned when a query iteration has completed.\nvar Done = errors.New(\"datastore: query has no more results\")\n\n// Next returns the key of the next result. When there are no more results,\n// Done is returned as the error.\n//\n// If the query is not keys only and dst is non-nil, it also loads the entity\n// stored for that key into the struct pointer or PropertyLoadSaver dst, with\n// the same semantics and possible errors as for the Get function.\nfunc (t *Iterator) Next(dst interface{}) (*Key, error) {\n\tk, e, err := t.next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif dst != nil && !t.q.keysOnly {\n\t\terr = loadEntity(dst, e)\n\t}\n\treturn k, err\n}\n\nfunc (t *Iterator) next() (*Key, *pb.EntityProto, error) {\n\tif t.err != nil {\n\t\treturn nil, nil, t.err\n\t}\n\n\t// Issue datastore_v3/Next RPCs as necessary.\n\tfor t.i == len(t.res.Result) {\n\t\tif !t.res.GetMoreResults() {\n\t\t\tt.err = Done\n\t\t\treturn nil, nil, t.err\n\t\t}\n\t\tt.prevCC = t.res.CompiledCursor\n\t\tif err := callNext(t.c, &t.res, 0, t.limit); err != nil {\n\t\t\tt.err = err\n\t\t\treturn nil, nil, t.err\n\t\t}\n\t\tif t.res.GetSkippedResults() != 0 {\n\t\t\tt.err = errors.New(\"datastore: internal error: iterator has skipped results\")\n\t\t\treturn nil, nil, t.err\n\t\t}\n\t\tt.i = 0\n\t\tif t.limit >= 0 {\n\t\t\tt.limit -= int32(len(t.res.Result))\n\t\t\tif t.limit < 0 {\n\t\t\t\tt.err = errors.New(\"datastore: internal error: query returned more results than the limit\")\n\t\t\t\treturn nil, nil, t.err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Extract the key from the t.i'th element of t.res.Result.\n\te := t.res.Result[t.i]\n\tt.i++\n\tif e.Key == nil {\n\t\treturn nil, nil, errors.New(\"datastore: internal error: server did not return a key\")\n\t}\n\tk, err := protoToKey(e.Key)\n\tif err != nil || k.Incomplete() {\n\t\treturn nil, nil, errors.New(\"datastore: internal error: server returned an invalid key\")\n\t}\n\treturn k, e, nil\n}\n\n// Cursor returns a cursor for the iterator's current location.\nfunc (t *Iterator) Cursor() (Cursor, error) {\n\tif t.err != nil && t.err != Done {\n\t\treturn Cursor{}, t.err\n\t}\n\t// If we are at either end of the current batch of results,\n\t// return the compiled cursor at that end.\n\tskipped := t.res.GetSkippedResults()\n\tif t.i == 0 && skipped == 0 {\n\t\tif t.prevCC == nil {\n\t\t\t// A nil pointer (of type *pb.CompiledCursor) means no constraint:\n\t\t\t// passing it as the end cursor of a new query means unlimited results\n\t\t\t// (glossing over the integer limit parameter for now).\n\t\t\t// A non-nil pointer to an empty pb.CompiledCursor means the start:\n\t\t\t// passing it as the end cursor of a new query means 0 results.\n\t\t\t// If prevCC was nil, then the original query had no start cursor, but\n\t\t\t// Iterator.Cursor should return \"the start\" instead of unlimited.\n\t\t\treturn Cursor{&zeroCC}, nil\n\t\t}\n\t\treturn Cursor{t.prevCC}, nil\n\t}\n\tif t.i == len(t.res.Result) {\n\t\treturn Cursor{t.res.CompiledCursor}, nil\n\t}\n\t// Otherwise, re-run the query offset to this iterator's position, starting from\n\t// the most recent compiled cursor. This is done on a best-effort basis, as it\n\t// is racy; if a concurrent process has added or removed entities, then the\n\t// cursor returned may be inconsistent.\n\tq := t.q.clone()\n\tq.start = t.prevCC\n\tq.offset = skipped + int32(t.i)\n\tq.limit = 0\n\tq.keysOnly = len(q.projection) == 0\n\tt1 := q.Run(t.c)\n\t_, _, err := t1.next()\n\tif err != Done {\n\t\tif err == nil {\n\t\t\terr = fmt.Errorf(\"datastore: internal error: zero-limit query did not have zero results\")\n\t\t}\n\t\treturn Cursor{}, err\n\t}\n\treturn Cursor{t1.res.CompiledCursor}, nil\n}\n\nvar zeroCC pb.CompiledCursor\n\n// Cursor is an iterator's position. It can be converted to and from an opaque\n// string. A cursor can be used from different HTTP requests, but only with a\n// query with the same kind, ancestor, filter and order constraints.\ntype Cursor struct {\n\tcc *pb.CompiledCursor\n}\n\n// String returns a base-64 string representation of a cursor.\nfunc (c Cursor) String() string {\n\tif c.cc == nil {\n\t\treturn \"\"\n\t}\n\tb, err := proto.Marshal(c.cc)\n\tif err != nil {\n\t\t// The only way to construct a Cursor with a non-nil cc field is to\n\t\t// unmarshal from the byte representation. We panic if the unmarshal\n\t\t// succeeds but the marshaling of the unchanged protobuf value fails.\n\t\tpanic(fmt.Sprintf(\"datastore: internal error: malformed cursor: %v\", err))\n\t}\n\treturn strings.TrimRight(base64.URLEncoding.EncodeToString(b), \"=\")\n}\n\n// Decode decodes a cursor from its base-64 string representation.\nfunc DecodeCursor(s string) (Cursor, error) {\n\tif s == \"\" {\n\t\treturn Cursor{&zeroCC}, nil\n\t}\n\tif n := len(s) % 4; n != 0 {\n\t\ts += strings.Repeat(\"=\", 4-n)\n\t}\n\tb, err := base64.URLEncoding.DecodeString(s)\n\tif err != nil {\n\t\treturn Cursor{}, err\n\t}\n\tcc := &pb.CompiledCursor{}\n\tif err := proto.Unmarshal(b, cc); err != nil {\n\t\treturn Cursor{}, err\n\t}\n\treturn Cursor{cc}, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/save.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\t\"google.golang.org/appengine\"\n\tpb \"google.golang.org/appengine/internal/datastore\"\n)\n\nfunc toUnixMicro(t time.Time) int64 {\n\t// We cannot use t.UnixNano() / 1e3 because we want to handle times more than\n\t// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot\n\t// be represented in the numerator of a single int64 divide.\n\treturn t.Unix()*1e6 + int64(t.Nanosecond()/1e3)\n}\n\nfunc fromUnixMicro(t int64) time.Time {\n\treturn time.Unix(t/1e6, (t%1e6)*1e3)\n}\n\nvar (\n\tminTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)\n\tmaxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)\n)\n\n// valueToProto converts a named value to a newly allocated Property.\n// The returned error string is empty on success.\nfunc valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {\n\tvar (\n\t\tpv          pb.PropertyValue\n\t\tunsupported bool\n\t)\n\tswitch v.Kind() {\n\tcase reflect.Invalid:\n\t\t// No-op.\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tpv.Int64Value = proto.Int64(v.Int())\n\tcase reflect.Bool:\n\t\tpv.BooleanValue = proto.Bool(v.Bool())\n\tcase reflect.String:\n\t\tpv.StringValue = proto.String(v.String())\n\tcase reflect.Float32, reflect.Float64:\n\t\tpv.DoubleValue = proto.Float64(v.Float())\n\tcase reflect.Ptr:\n\t\tif k, ok := v.Interface().(*Key); ok {\n\t\t\tif k != nil {\n\t\t\t\tpv.Referencevalue = keyToReferenceValue(defaultAppID, k)\n\t\t\t}\n\t\t} else {\n\t\t\tunsupported = true\n\t\t}\n\tcase reflect.Struct:\n\t\tswitch t := v.Interface().(type) {\n\t\tcase time.Time:\n\t\t\tif t.Before(minTime) || t.After(maxTime) {\n\t\t\t\treturn nil, \"time value out of range\"\n\t\t\t}\n\t\t\tpv.Int64Value = proto.Int64(toUnixMicro(t))\n\t\tcase appengine.GeoPoint:\n\t\t\tif !t.Valid() {\n\t\t\t\treturn nil, \"invalid GeoPoint value\"\n\t\t\t}\n\t\t\t// NOTE: Strangely, latitude maps to X, longitude to Y.\n\t\t\tpv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}\n\t\tdefault:\n\t\t\tunsupported = true\n\t\t}\n\tcase reflect.Slice:\n\t\tif b, ok := v.Interface().([]byte); ok {\n\t\t\tpv.StringValue = proto.String(string(b))\n\t\t} else {\n\t\t\t// nvToProto should already catch slice values.\n\t\t\t// If we get here, we have a slice of slice values.\n\t\t\tunsupported = true\n\t\t}\n\tdefault:\n\t\tunsupported = true\n\t}\n\tif unsupported {\n\t\treturn nil, \"unsupported datastore value type: \" + v.Type().String()\n\t}\n\tp = &pb.Property{\n\t\tName:     proto.String(name),\n\t\tValue:    &pv,\n\t\tMultiple: proto.Bool(multiple),\n\t}\n\tif v.IsValid() {\n\t\tswitch v.Interface().(type) {\n\t\tcase []byte:\n\t\t\tp.Meaning = pb.Property_BLOB.Enum()\n\t\tcase ByteString:\n\t\t\tp.Meaning = pb.Property_BYTESTRING.Enum()\n\t\tcase appengine.BlobKey:\n\t\t\tp.Meaning = pb.Property_BLOBKEY.Enum()\n\t\tcase time.Time:\n\t\t\tp.Meaning = pb.Property_GD_WHEN.Enum()\n\t\tcase appengine.GeoPoint:\n\t\t\tp.Meaning = pb.Property_GEORSS_POINT.Enum()\n\t\t}\n\t}\n\treturn p, \"\"\n}\n\n// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.\nfunc saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {\n\tvar err error\n\tvar props []Property\n\tif e, ok := src.(PropertyLoadSaver); ok {\n\t\tprops, err = e.Save()\n\t} else {\n\t\tprops, err = SaveStruct(src)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn propertiesToProto(defaultAppID, key, props)\n}\n\nfunc saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error {\n\tp := Property{\n\t\tName:     name,\n\t\tNoIndex:  noIndex,\n\t\tMultiple: multiple,\n\t}\n\tswitch x := v.Interface().(type) {\n\tcase *Key:\n\t\tp.Value = x\n\tcase time.Time:\n\t\tp.Value = x\n\tcase appengine.BlobKey:\n\t\tp.Value = x\n\tcase appengine.GeoPoint:\n\t\tp.Value = x\n\tcase ByteString:\n\t\tp.Value = x\n\tdefault:\n\t\tswitch v.Kind() {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\tp.Value = v.Int()\n\t\tcase reflect.Bool:\n\t\t\tp.Value = v.Bool()\n\t\tcase reflect.String:\n\t\t\tp.Value = v.String()\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tp.Value = v.Float()\n\t\tcase reflect.Slice:\n\t\t\tif v.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t\tp.NoIndex = true\n\t\t\t\tp.Value = v.Bytes()\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tif !v.CanAddr() {\n\t\t\t\treturn fmt.Errorf(\"datastore: unsupported struct field: value is unaddressable\")\n\t\t\t}\n\t\t\tsub, err := newStructPLS(v.Addr().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"datastore: unsupported struct field: %v\", err)\n\t\t\t}\n\t\t\treturn sub.(structPLS).save(props, name, noIndex, multiple)\n\t\t}\n\t}\n\tif p.Value == nil {\n\t\treturn fmt.Errorf(\"datastore: unsupported struct field type: %v\", v.Type())\n\t}\n\t*props = append(*props, p)\n\treturn nil\n}\n\nfunc (s structPLS) Save() ([]Property, error) {\n\tvar props []Property\n\tif err := s.save(&props, \"\", false, false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn props, nil\n}\n\nfunc (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error {\n\tfor i, t := range s.codec.byIndex {\n\t\tif t.name == \"-\" {\n\t\t\tcontinue\n\t\t}\n\t\tname := t.name\n\t\tif prefix != \"\" {\n\t\t\tname = prefix + name\n\t\t}\n\t\tv := s.v.Field(i)\n\t\tif !v.IsValid() || !v.CanSet() {\n\t\t\tcontinue\n\t\t}\n\t\tnoIndex1 := noIndex || t.noIndex\n\t\t// For slice fields that aren't []byte, save each element.\n\t\tif v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\tfor j := 0; j < v.Len(); j++ {\n\t\t\t\tif err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// Otherwise, save the field itself.\n\t\tif err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {\n\te := &pb.EntityProto{\n\t\tKey: keyToProto(defaultAppID, key),\n\t}\n\tif key.parent == nil {\n\t\te.EntityGroup = &pb.Path{}\n\t} else {\n\t\te.EntityGroup = keyToProto(defaultAppID, key.root()).Path\n\t}\n\tprevMultiple := make(map[string]bool)\n\n\tfor _, p := range props {\n\t\tif pm, ok := prevMultiple[p.Name]; ok {\n\t\t\tif !pm || !p.Multiple {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: multiple Properties with Name %q, but Multiple is false\", p.Name)\n\t\t\t}\n\t\t} else {\n\t\t\tprevMultiple[p.Name] = p.Multiple\n\t\t}\n\n\t\tx := &pb.Property{\n\t\t\tName:     proto.String(p.Name),\n\t\t\tValue:    new(pb.PropertyValue),\n\t\t\tMultiple: proto.Bool(p.Multiple),\n\t\t}\n\t\tswitch v := p.Value.(type) {\n\t\tcase int64:\n\t\t\tx.Value.Int64Value = proto.Int64(v)\n\t\tcase bool:\n\t\t\tx.Value.BooleanValue = proto.Bool(v)\n\t\tcase string:\n\t\t\tx.Value.StringValue = proto.String(v)\n\t\t\tif p.NoIndex {\n\t\t\t\tx.Meaning = pb.Property_TEXT.Enum()\n\t\t\t}\n\t\tcase float64:\n\t\t\tx.Value.DoubleValue = proto.Float64(v)\n\t\tcase *Key:\n\t\t\tif v != nil {\n\t\t\t\tx.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)\n\t\t\t}\n\t\tcase time.Time:\n\t\t\tif v.Before(minTime) || v.After(maxTime) {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: time value out of range\")\n\t\t\t}\n\t\t\tx.Value.Int64Value = proto.Int64(toUnixMicro(v))\n\t\t\tx.Meaning = pb.Property_GD_WHEN.Enum()\n\t\tcase appengine.BlobKey:\n\t\t\tx.Value.StringValue = proto.String(string(v))\n\t\t\tx.Meaning = pb.Property_BLOBKEY.Enum()\n\t\tcase appengine.GeoPoint:\n\t\t\tif !v.Valid() {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: invalid GeoPoint value\")\n\t\t\t}\n\t\t\t// NOTE: Strangely, latitude maps to X, longitude to Y.\n\t\t\tx.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}\n\t\t\tx.Meaning = pb.Property_GEORSS_POINT.Enum()\n\t\tcase []byte:\n\t\t\tx.Value.StringValue = proto.String(string(v))\n\t\t\tx.Meaning = pb.Property_BLOB.Enum()\n\t\t\tif !p.NoIndex {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: cannot index a []byte valued Property with Name %q\", p.Name)\n\t\t\t}\n\t\tcase ByteString:\n\t\t\tx.Value.StringValue = proto.String(string(v))\n\t\t\tx.Meaning = pb.Property_BYTESTRING.Enum()\n\t\tdefault:\n\t\t\tif p.Value != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: invalid Value type for a Property with Name %q\", p.Name)\n\t\t\t}\n\t\t}\n\n\t\tif p.NoIndex {\n\t\t\te.RawProperty = append(e.RawProperty, x)\n\t\t} else {\n\t\t\te.Property = append(e.Property, x)\n\t\t\tif len(e.Property) > maxIndexedProperties {\n\t\t\t\treturn nil, errors.New(\"datastore: too many indexed properties\")\n\t\t\t}\n\t\t}\n\t}\n\treturn e, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/datastore/transaction.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage datastore\n\nimport (\n\t\"errors\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/datastore\"\n)\n\nfunc init() {\n\tinternal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {\n\t\tx.Transaction = t\n\t})\n\tinternal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {\n\t\tx.Transaction = t\n\t})\n\tinternal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {\n\t\tx.Transaction = t\n\t})\n\tinternal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {\n\t\tx.Transaction = t\n\t})\n}\n\n// ErrConcurrentTransaction is returned when a transaction is rolled back due\n// to a conflict with a concurrent transaction.\nvar ErrConcurrentTransaction = errors.New(\"datastore: concurrent transaction\")\n\n// RunInTransaction runs f in a transaction. It calls f with a transaction\n// context tc that f should use for all App Engine operations.\n//\n// If f returns nil, RunInTransaction attempts to commit the transaction,\n// returning nil if it succeeds. If the commit fails due to a conflicting\n// transaction, RunInTransaction retries f, each time with a new transaction\n// context. It gives up and returns ErrConcurrentTransaction after three\n// failed attempts. The number of attempts can be configured by specifying\n// TransactionOptions.Attempts.\n//\n// If f returns non-nil, then any datastore changes will not be applied and\n// RunInTransaction returns that same error. The function f is not retried.\n//\n// Note that when f returns, the transaction is not yet committed. Calling code\n// must be careful not to assume that any of f's changes have been committed\n// until RunInTransaction returns nil.\n//\n// Since f may be called multiple times, f should usually be idempotent.\n// datastore.Get is not idempotent when unmarshaling slice fields.\n//\n// Nested transactions are not supported; c may not be a transaction context.\nfunc RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {\n\txg := false\n\tif opts != nil {\n\t\txg = opts.XG\n\t}\n\tattempts := 3\n\tif opts != nil && opts.Attempts > 0 {\n\t\tattempts = opts.Attempts\n\t}\n\tfor i := 0; i < attempts; i++ {\n\t\tif err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ErrConcurrentTransaction\n}\n\n// TransactionOptions are the options for running a transaction.\ntype TransactionOptions struct {\n\t// XG is whether the transaction can cross multiple entity groups. In\n\t// comparison, a single group transaction is one where all datastore keys\n\t// used have the same root key. Note that cross group transactions do not\n\t// have the same behavior as single group transactions. In particular, it\n\t// is much more likely to see partially applied transactions in different\n\t// entity groups, in global queries.\n\t// It is valid to set XG to true even if the transaction is within a\n\t// single entity group.\n\tXG bool\n\t// Attempts controls the number of retries to perform when commits fail\n\t// due to a conflicting transaction. If omitted, it defaults to 3.\n\tAttempts int\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/delay/delay.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage delay provides a way to execute code outside the scope of a\nuser request by using the taskqueue API.\n\nTo declare a function that may be executed later, call Func\nin a top-level assignment context, passing it an arbitrary string key\nand a function whose first argument is of type context.Context.\n\tvar laterFunc = delay.Func(\"key\", myFunc)\nIt is also possible to use a function literal.\n\tvar laterFunc = delay.Func(\"key\", func(c context.Context, x string) {\n\t\t// ...\n\t})\n\nTo call a function, invoke its Call method.\n\tlaterFunc.Call(c, \"something\")\nA function may be called any number of times. If the function has any\nreturn arguments, and the last one is of type error, the function may\nreturn a non-nil error to signal that the function should be retried.\n\nThe arguments to functions may be of any type that is encodable by the gob\npackage. If an argument is of interface type, it is the client's responsibility\nto register with the gob package whatever concrete type may be passed for that\nargument; see http://golang.org/pkg/gob/#Register for details.\n\nAny errors during initialization or execution of a function will be\nlogged to the application logs. Error logs that occur during initialization will\nbe associated with the request that invoked the Call method.\n\nThe state of a function invocation that has not yet successfully\nexecuted is preserved by combining the file name in which it is declared\nwith the string key that was passed to the Func function. Updating an app\nwith pending function invocations is safe as long as the relevant\nfunctions have the (filename, key) combination preserved.\n\nThe delay package uses the Task Queue API to create tasks that call the\nreserved application path \"/_ah/queue/go/delay\".\nThis path must not be marked as \"login: required\" in app.yaml;\nit must be marked as \"login: admin\" or have no access restriction.\n*/\npackage delay\n\nimport (\n\t\"bytes\"\n\t\"encoding/gob\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"runtime\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/log\"\n\t\"google.golang.org/appengine/taskqueue\"\n)\n\n// Function represents a function that may have a delayed invocation.\ntype Function struct {\n\tfv  reflect.Value // Kind() == reflect.Func\n\tkey string\n\terr error // any error during initialization\n}\n\nconst (\n\t// The HTTP path for invocations.\n\tpath = \"/_ah/queue/go/delay\"\n\t// Use the default queue.\n\tqueue = \"\"\n)\n\nvar (\n\t// registry of all delayed functions\n\tfuncs = make(map[string]*Function)\n\n\t// precomputed types\n\tcontextType = reflect.TypeOf((*context.Context)(nil)).Elem()\n\terrorType   = reflect.TypeOf((*error)(nil)).Elem()\n\n\t// errors\n\terrFirstArg = errors.New(\"first argument must be context.Context\")\n)\n\n// Func declares a new Function. The second argument must be a function with a\n// first argument of type context.Context.\n// This function must be called at program initialization time. That means it\n// must be called in a global variable declaration or from an init function.\n// This restriction is necessary because the instance that delays a function\n// call may not be the one that executes it. Only the code executed at program\n// initialization time is guaranteed to have been run by an instance before it\n// receives a request.\nfunc Func(key string, i interface{}) *Function {\n\tf := &Function{fv: reflect.ValueOf(i)}\n\n\t// Derive unique, somewhat stable key for this func.\n\t_, file, _, _ := runtime.Caller(1)\n\tf.key = file + \":\" + key\n\n\tt := f.fv.Type()\n\tif t.Kind() != reflect.Func {\n\t\tf.err = errors.New(\"not a function\")\n\t\treturn f\n\t}\n\tif t.NumIn() == 0 || t.In(0) != contextType {\n\t\tf.err = errFirstArg\n\t\treturn f\n\t}\n\n\t// Register the function's arguments with the gob package.\n\t// This is required because they are marshaled inside a []interface{}.\n\t// gob.Register only expects to be called during initialization;\n\t// that's fine because this function expects the same.\n\tfor i := 0; i < t.NumIn(); i++ {\n\t\t// Only concrete types may be registered. If the argument has\n\t\t// interface type, the client is resposible for registering the\n\t\t// concrete types it will hold.\n\t\tif t.In(i).Kind() == reflect.Interface {\n\t\t\tcontinue\n\t\t}\n\t\tgob.Register(reflect.Zero(t.In(i)).Interface())\n\t}\n\n\tfuncs[f.key] = f\n\treturn f\n}\n\ntype invocation struct {\n\tKey  string\n\tArgs []interface{}\n}\n\n// Call invokes a delayed function.\n//   err := f.Call(c, ...)\n// is equivalent to\n//   t, _ := f.Task(...)\n//   err := taskqueue.Add(c, t, \"\")\nfunc (f *Function) Call(c context.Context, args ...interface{}) error {\n\tt, err := f.Task(args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = taskqueueAdder(c, t, queue)\n\treturn err\n}\n\n// Task creates a Task that will invoke the function.\n// Its parameters may be tweaked before adding it to a queue.\n// Users should not modify the Path or Payload fields of the returned Task.\nfunc (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) {\n\tif f.err != nil {\n\t\treturn nil, fmt.Errorf(\"delay: func is invalid: %v\", f.err)\n\t}\n\n\tnArgs := len(args) + 1 // +1 for the context.Context\n\tft := f.fv.Type()\n\tminArgs := ft.NumIn()\n\tif ft.IsVariadic() {\n\t\tminArgs--\n\t}\n\tif nArgs < minArgs {\n\t\treturn nil, fmt.Errorf(\"delay: too few arguments to func: %d < %d\", nArgs, minArgs)\n\t}\n\tif !ft.IsVariadic() && nArgs > minArgs {\n\t\treturn nil, fmt.Errorf(\"delay: too many arguments to func: %d > %d\", nArgs, minArgs)\n\t}\n\n\t// Check arg types.\n\tfor i := 1; i < nArgs; i++ {\n\t\tat := reflect.TypeOf(args[i-1])\n\t\tvar dt reflect.Type\n\t\tif i < minArgs {\n\t\t\t// not a variadic arg\n\t\t\tdt = ft.In(i)\n\t\t} else {\n\t\t\t// a variadic arg\n\t\t\tdt = ft.In(minArgs).Elem()\n\t\t}\n\t\t// nil arguments won't have a type, so they need special handling.\n\t\tif at == nil {\n\t\t\t// nil interface\n\t\t\tswitch dt.Kind() {\n\t\t\tcase reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:\n\t\t\t\tcontinue // may be nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"delay: argument %d has wrong type: %v is not nilable\", i, dt)\n\t\t}\n\t\tswitch at.Kind() {\n\t\tcase reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:\n\t\t\tav := reflect.ValueOf(args[i-1])\n\t\t\tif av.IsNil() {\n\t\t\t\t// nil value in interface; not supported by gob, so we replace it\n\t\t\t\t// with a nil interface value\n\t\t\t\targs[i-1] = nil\n\t\t\t}\n\t\t}\n\t\tif !at.AssignableTo(dt) {\n\t\t\treturn nil, fmt.Errorf(\"delay: argument %d has wrong type: %v is not assignable to %v\", i, at, dt)\n\t\t}\n\t}\n\n\tinv := invocation{\n\t\tKey:  f.key,\n\t\tArgs: args,\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif err := gob.NewEncoder(buf).Encode(inv); err != nil {\n\t\treturn nil, fmt.Errorf(\"delay: gob encoding failed: %v\", err)\n\t}\n\n\treturn &taskqueue.Task{\n\t\tPath:    path,\n\t\tPayload: buf.Bytes(),\n\t}, nil\n}\n\nvar taskqueueAdder = taskqueue.Add // for testing\n\nfunc init() {\n\thttp.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {\n\t\trunFunc(appengine.NewContext(req), w, req)\n\t})\n}\n\nfunc runFunc(c context.Context, w http.ResponseWriter, req *http.Request) {\n\tdefer req.Body.Close()\n\n\tvar inv invocation\n\tif err := gob.NewDecoder(req.Body).Decode(&inv); err != nil {\n\t\tlog.Errorf(c, \"delay: failed decoding task payload: %v\", err)\n\t\tlog.Warningf(c, \"delay: dropping task\")\n\t\treturn\n\t}\n\n\tf := funcs[inv.Key]\n\tif f == nil {\n\t\tlog.Errorf(c, \"delay: no func with key %q found\", inv.Key)\n\t\tlog.Warningf(c, \"delay: dropping task\")\n\t\treturn\n\t}\n\n\tft := f.fv.Type()\n\tin := []reflect.Value{reflect.ValueOf(c)}\n\tfor _, arg := range inv.Args {\n\t\tvar v reflect.Value\n\t\tif arg != nil {\n\t\t\tv = reflect.ValueOf(arg)\n\t\t} else {\n\t\t\t// Task was passed a nil argument, so we must construct\n\t\t\t// the zero value for the argument here.\n\t\t\tn := len(in) // we're constructing the nth argument\n\t\t\tvar at reflect.Type\n\t\t\tif !ft.IsVariadic() || n < ft.NumIn()-1 {\n\t\t\t\tat = ft.In(n)\n\t\t\t} else {\n\t\t\t\tat = ft.In(ft.NumIn() - 1).Elem()\n\t\t\t}\n\t\t\tv = reflect.Zero(at)\n\t\t}\n\t\tin = append(in, v)\n\t}\n\tout := f.fv.Call(in)\n\n\tif n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType {\n\t\tif errv := out[n-1]; !errv.IsNil() {\n\t\t\tlog.Errorf(c, \"delay: func failed (will retry): %v\", errv.Interface())\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/demos/guestbook/app.yaml",
    "content": "# Demo application for Managed VMs.\nruntime: go\nvm: true\napi_version: go1\n\nmanual_scaling:\n  instances: 1\n\nhandlers:\n# Favicon.  Without this, the browser hits this once per page view.\n- url: /favicon.ico\n  static_files: favicon.ico\n  upload: favicon.ico\n\n# Main app.  All the real work is here.\n- url: /.*\n  script: _go_app\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/demos/guestbook/guestbook.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// This example only works on Managed VMs.\n// +build !appengine\n\npackage main\n\nimport (\n\t\"html/template\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/datastore\"\n\t\"google.golang.org/appengine/log\"\n\t\"google.golang.org/appengine/user\"\n)\n\nvar initTime time.Time\n\ntype Greeting struct {\n\tAuthor  string\n\tContent string\n\tDate    time.Time\n}\n\nfunc main() {\n\thttp.HandleFunc(\"/\", handleMainPage)\n\thttp.HandleFunc(\"/sign\", handleSign)\n\tappengine.Main()\n}\n\n// guestbookKey returns the key used for all guestbook entries.\nfunc guestbookKey(ctx context.Context) *datastore.Key {\n\t// The string \"default_guestbook\" here could be varied to have multiple guestbooks.\n\treturn datastore.NewKey(ctx, \"Guestbook\", \"default_guestbook\", 0, nil)\n}\n\nvar tpl = template.Must(template.ParseGlob(\"templates/*.html\"))\n\nfunc handleMainPage(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"GET requests only\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tctx := appengine.NewContext(r)\n\ttic := time.Now()\n\tq := datastore.NewQuery(\"Greeting\").Ancestor(guestbookKey(ctx)).Order(\"-Date\").Limit(10)\n\tvar gg []*Greeting\n\tif _, err := q.GetAll(ctx, &gg); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Errorf(ctx, \"GetAll: %v\", err)\n\t\treturn\n\t}\n\tlog.Infof(ctx, \"Datastore lookup took %s\", time.Since(tic).String())\n\tlog.Infof(ctx, \"Rendering %d greetings\", len(gg))\n\n\tvar email, logout, login string\n\tif u := user.Current(ctx); u != nil {\n\t\tlogout, _ = user.LogoutURL(ctx, \"/\")\n\t\temail = u.Email\n\t} else {\n\t\tlogin, _ = user.LoginURL(ctx, \"/\")\n\t}\n\tdata := struct {\n\t\tGreetings            []*Greeting\n\t\tLogin, Logout, Email string\n\t}{\n\t\tGreetings: gg,\n\t\tLogin:     login,\n\t\tLogout:    logout,\n\t\tEmail:     email,\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tif err := tpl.ExecuteTemplate(w, \"guestbook.html\", data); err != nil {\n\t\tlog.Errorf(ctx, \"%v\", err)\n\t}\n}\n\nfunc handleSign(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"POST requests only\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tctx := appengine.NewContext(r)\n\tg := &Greeting{\n\t\tContent: r.FormValue(\"content\"),\n\t\tDate:    time.Now(),\n\t}\n\tif u := user.Current(ctx); u != nil {\n\t\tg.Author = u.String()\n\t}\n\tkey := datastore.NewIncompleteKey(ctx, \"Greeting\", guestbookKey(ctx))\n\tif _, err := datastore.Put(ctx, key, g); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// Redirect with 303 which causes the subsequent request to use GET.\n\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/demos/guestbook/index.yaml",
    "content": "indexes:\n\n- kind: Greeting\n  ancestor: yes\n  properties:\n  - name: Date\n    direction: desc\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <title>Guestbook Demo</title>\n  </head>\n  <body>\n    <p>\n      {{with .Email}}You are currently logged in as {{.}}.{{end}}\n      {{with .Login}}<a href=\"{{.}}\">Sign in</a>{{end}}\n      {{with .Logout}}<a href=\"{{.}}\">Sign out</a>{{end}}\n    </p>\n\n    {{range .Greetings }}\n    <p>\n      {{with .Author}}<b>{{.}}</b>{{else}}An anonymous person{{end}}\n      on <em>{{.Date.Format \"3:04pm, Mon 2 Jan\"}}</em>\n      wrote <blockquote>{{.Content}}</blockquote>\n    </p>\n    {{end}}\n\n    <form action=\"/sign\" method=\"post\">\n      <div><textarea name=\"content\" rows=\"3\" cols=\"60\"></textarea></div>\n      <div><input type=\"submit\" value=\"Sign Guestbook\"></div>\n    </form>\n  </body>\n</html>\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/demos/helloworld/app.yaml",
    "content": "runtime: go\napi_version: go1\nvm: true\n\nmanual_scaling:\n  instances: 1\n\nhandlers:\n- url: /favicon.ico\n  static_files: favicon.ico\n  upload: favicon.ico\n- url: /.*\n  script: _go_app\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/demos/helloworld/helloworld.go",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// This example only works on Managed VMs.\n// +build !appengine\n\npackage main\n\nimport (\n\t\"html/template\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/log\"\n)\n\nvar initTime = time.Now()\n\nfunc main() {\n\thttp.HandleFunc(\"/\", handle)\n\tappengine.Main()\n}\n\nfunc handle(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tctx := appengine.NewContext(r)\n\tlog.Infof(ctx, \"Serving the front page.\")\n\n\ttmpl.Execute(w, time.Since(initTime))\n}\n\nvar tmpl = template.Must(template.New(\"front\").Parse(`\n<html><body>\n\n<p>\nHello, World! 세상아 안녕!\n</p>\n\n<p>\nThis instance has been running for <em>{{.}}</em>.\n</p>\n\n</body></html>\n`))\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/errors.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// This file provides error functions for common API failure modes.\n\npackage appengine\n\nimport (\n\t\"fmt\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\n// IsOverQuota reports whether err represents an API call failure\n// due to insufficient available quota.\nfunc IsOverQuota(err error) bool {\n\tcallErr, ok := err.(*internal.CallError)\n\treturn ok && callErr.Code == 4\n}\n\n// MultiError is returned by batch operations when there are errors with\n// particular elements. Errors will be in a one-to-one correspondence with\n// the input elements; successful elements will have a nil entry.\ntype MultiError []error\n\nfunc (m MultiError) Error() string {\n\ts, n := \"\", 0\n\tfor _, e := range m {\n\t\tif e != nil {\n\t\t\tif n == 0 {\n\t\t\t\ts = e.Error()\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}\n\tswitch n {\n\tcase 0:\n\t\treturn \"(0 errors)\"\n\tcase 1:\n\t\treturn s\n\tcase 2:\n\t\treturn s + \" (and 1 other error)\"\n\t}\n\treturn fmt.Sprintf(\"%s (and %d other errors)\", s, n-1)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/file/file.go",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package file provides helper functions for using Google Cloud Storage.\npackage file\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\taipb \"google.golang.org/appengine/internal/app_identity\"\n)\n\n// DefaultBucketName returns the name of this application's\n// default Google Cloud Storage bucket.\nfunc DefaultBucketName(c context.Context) (string, error) {\n\treq := &aipb.GetDefaultGcsBucketNameRequest{}\n\tres := &aipb.GetDefaultGcsBucketNameResponse{}\n\n\terr := internal.Call(c, \"app_identity_service\", \"GetDefaultGcsBucketName\", req, res)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"file: no default bucket name returned in RPC response: %v\", res)\n\t}\n\treturn res.GetDefaultGcsBucketName(), nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/identity.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage appengine\n\nimport (\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/app_identity\"\n\tmodpb \"google.golang.org/appengine/internal/modules\"\n)\n\n// AppID returns the application ID for the current application.\n// The string will be a plain application ID (e.g. \"appid\"), with a\n// domain prefix for custom domain deployments (e.g. \"example.com:appid\").\nfunc AppID(c context.Context) string { return internal.AppID(c) }\n\n// DefaultVersionHostname returns the standard hostname of the default version\n// of the current application (e.g. \"my-app.appspot.com\"). This is suitable for\n// use in constructing URLs.\nfunc DefaultVersionHostname(c context.Context) string {\n\treturn internal.DefaultVersionHostname(c)\n}\n\n// ModuleName returns the module name of the current instance.\nfunc ModuleName(c context.Context) string {\n\treturn internal.ModuleName(c)\n}\n\n// ModuleHostname returns a hostname of a module instance.\n// If module is the empty string, it refers to the module of the current instance.\n// If version is empty, it refers to the version of the current instance if valid,\n// or the default version of the module of the current instance.\n// If instance is empty, ModuleHostname returns the load-balancing hostname.\nfunc ModuleHostname(c context.Context, module, version, instance string) (string, error) {\n\treq := &modpb.GetHostnameRequest{}\n\tif module != \"\" {\n\t\treq.Module = &module\n\t}\n\tif version != \"\" {\n\t\treq.Version = &version\n\t}\n\tif instance != \"\" {\n\t\treq.Instance = &instance\n\t}\n\tres := &modpb.GetHostnameResponse{}\n\tif err := internal.Call(c, \"modules\", \"GetHostname\", req, res); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *res.Hostname, nil\n}\n\n// VersionID returns the version ID for the current application.\n// It will be of the form \"X.Y\", where X is specified in app.yaml,\n// and Y is a number generated when each version of the app is uploaded.\n// It does not include a module name.\nfunc VersionID(c context.Context) string { return internal.VersionID(c) }\n\n// InstanceID returns a mostly-unique identifier for this instance.\nfunc InstanceID() string { return internal.InstanceID() }\n\n// Datacenter returns an identifier for the datacenter that the instance is running in.\nfunc Datacenter(c context.Context) string { return internal.Datacenter(c) }\n\n// ServerSoftware returns the App Engine release version.\n// In production, it looks like \"Google App Engine/X.Y.Z\".\n// In the development appserver, it looks like \"Development/X.Y\".\nfunc ServerSoftware() string { return internal.ServerSoftware() }\n\n// RequestID returns a string that uniquely identifies the request.\nfunc RequestID(c context.Context) string { return internal.RequestID(c) }\n\n// AccessToken generates an OAuth2 access token for the specified scopes on\n// behalf of service account of this application. This token will expire after\n// the returned time.\nfunc AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {\n\treq := &pb.GetAccessTokenRequest{Scope: scopes}\n\tres := &pb.GetAccessTokenResponse{}\n\n\terr = internal.Call(c, \"app_identity_service\", \"GetAccessToken\", req, res)\n\tif err != nil {\n\t\treturn \"\", time.Time{}, err\n\t}\n\treturn res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil\n}\n\n// Certificate represents a public certificate for the app.\ntype Certificate struct {\n\tKeyName string\n\tData    []byte // PEM-encoded X.509 certificate\n}\n\n// PublicCertificates retrieves the public certificates for the app.\n// They can be used to verify a signature returned by SignBytes.\nfunc PublicCertificates(c context.Context) ([]Certificate, error) {\n\treq := &pb.GetPublicCertificateForAppRequest{}\n\tres := &pb.GetPublicCertificateForAppResponse{}\n\tif err := internal.Call(c, \"app_identity_service\", \"GetPublicCertificatesForApp\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\tvar cs []Certificate\n\tfor _, pc := range res.PublicCertificateList {\n\t\tcs = append(cs, Certificate{\n\t\t\tKeyName: pc.GetKeyName(),\n\t\t\tData:    []byte(pc.GetX509CertificatePem()),\n\t\t})\n\t}\n\treturn cs, nil\n}\n\n// ServiceAccount returns a string representing the service account name, in\n// the form of an email address (typically app_id@appspot.gserviceaccount.com).\nfunc ServiceAccount(c context.Context) (string, error) {\n\treq := &pb.GetServiceAccountNameRequest{}\n\tres := &pb.GetServiceAccountNameResponse{}\n\n\terr := internal.Call(c, \"app_identity_service\", \"GetServiceAccountName\", req, res)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn res.GetServiceAccountName(), err\n}\n\n// SignBytes signs bytes using a private key unique to your application.\nfunc SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {\n\treq := &pb.SignForAppRequest{BytesToSign: bytes}\n\tres := &pb.SignForAppResponse{}\n\n\tif err := internal.Call(c, \"app_identity_service\", \"SignForApp\", req, res); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn res.GetKeyName(), res.GetSignatureBytes(), nil\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"app_identity_service\", pb.AppIdentityServiceError_ErrorCode_name)\n\tinternal.RegisterErrorCodeMap(\"modules\", modpb.ModulesServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/image/image.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package image provides image services.\npackage image\n\nimport (\n\t\"fmt\"\n\t\"net/url\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/image\"\n)\n\ntype ServingURLOptions struct {\n\tSecure bool // whether the URL should use HTTPS\n\n\t// Size must be between zero and 1600.\n\t// If Size is non-zero, a resized version of the image is served,\n\t// and Size is the served image's longest dimension. The aspect ratio is preserved.\n\t// If Crop is true the image is cropped from the center instead of being resized.\n\tSize int\n\tCrop bool\n}\n\n// ServingURL returns a URL that will serve an image from Blobstore.\nfunc ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) {\n\treq := &pb.ImagesGetUrlBaseRequest{\n\t\tBlobKey: (*string)(&key),\n\t}\n\tif opts != nil && opts.Secure {\n\t\treq.CreateSecureUrl = &opts.Secure\n\t}\n\tres := &pb.ImagesGetUrlBaseResponse{}\n\tif err := internal.Call(c, \"images\", \"GetUrlBase\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// The URL may have suffixes added to dynamically resize or crop:\n\t// - adding \"=s32\" will serve the image resized to 32 pixels, preserving the aspect ratio.\n\t// - adding \"=s32-c\" is the same as \"=s32\" except it will be cropped.\n\tu := *res.Url\n\tif opts != nil && opts.Size > 0 {\n\t\tu += fmt.Sprintf(\"=s%d\", opts.Size)\n\t\tif opts.Crop {\n\t\t\tu += \"-c\"\n\t\t}\n\t}\n\treturn url.Parse(u)\n}\n\n// DeleteServingURL deletes the serving URL for an image.\nfunc DeleteServingURL(c context.Context, key appengine.BlobKey) error {\n\treq := &pb.ImagesDeleteUrlBaseRequest{\n\t\tBlobKey: (*string)(&key),\n\t}\n\tres := &pb.ImagesDeleteUrlBaseResponse{}\n\treturn internal.Call(c, \"images\", \"DeleteUrlBase\", req, res)\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"images\", pb.ImagesServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/aetesting/fake.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package aetesting provides utilities for testing App Engine packages.\n// This is not for testing user applications.\npackage aetesting\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\n// FakeSingleContext returns a context whose Call invocations will be serviced\n// by f, which should be a function that has two arguments of the input and output\n// protocol buffer type, and one error return.\nfunc FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context {\n\tfv := reflect.ValueOf(f)\n\tif fv.Kind() != reflect.Func {\n\t\tt.Fatal(\"not a function\")\n\t}\n\tft := fv.Type()\n\tif ft.NumIn() != 2 || ft.NumOut() != 1 {\n\t\tt.Fatalf(\"f has %d in and %d out, want 2 in and 1 out\", ft.NumIn(), ft.NumOut())\n\t}\n\tfor i := 0; i < 2; i++ {\n\t\tat := ft.In(i)\n\t\tif !at.Implements(protoMessageType) {\n\t\t\tt.Fatalf(\"arg %d does not implement proto.Message\", i)\n\t\t}\n\t}\n\tif ft.Out(0) != errorType {\n\t\tt.Fatalf(\"f's return is %v, want error\", ft.Out(0))\n\t}\n\ts := &single{\n\t\tt:       t,\n\t\tservice: service,\n\t\tmethod:  method,\n\t\tf:       fv,\n\t}\n\treturn internal.WithCallOverride(context.Background(), s.call)\n}\n\nvar (\n\tprotoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()\n\terrorType        = reflect.TypeOf((*error)(nil)).Elem()\n)\n\ntype single struct {\n\tt               *testing.T\n\tservice, method string\n\tf               reflect.Value\n}\n\nfunc (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error {\n\tif service == \"__go__\" {\n\t\tif method == \"GetNamespace\" {\n\t\t\treturn nil // always yield an empty namespace\n\t\t}\n\t\treturn fmt.Errorf(\"Unknown API call /%s.%s\", service, method)\n\t}\n\tif service != s.service || method != s.method {\n\t\ts.t.Fatalf(\"Unexpected call to /%s.%s\", service, method)\n\t}\n\tins := []reflect.Value{\n\t\treflect.ValueOf(in),\n\t\treflect.ValueOf(out),\n\t}\n\touts := s.f.Call(ins)\n\tif outs[0].IsNil() {\n\t\treturn nil\n\t}\n\treturn outs[0].Interface().(error)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/api.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build !appengine\n\npackage internal\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tnetcontext \"golang.org/x/net/context\"\n\n\tbasepb \"google.golang.org/appengine/internal/base\"\n\tlogpb \"google.golang.org/appengine/internal/log\"\n\tremotepb \"google.golang.org/appengine/internal/remote_api\"\n)\n\nconst (\n\tapiPath = \"/rpc_http\"\n)\n\nvar (\n\t// Incoming headers.\n\tticketHeader       = http.CanonicalHeaderKey(\"X-AppEngine-API-Ticket\")\n\tdapperHeader       = http.CanonicalHeaderKey(\"X-Google-DapperTraceInfo\")\n\ttraceHeader        = http.CanonicalHeaderKey(\"X-Cloud-Trace-Context\")\n\tcurNamespaceHeader = http.CanonicalHeaderKey(\"X-AppEngine-Current-Namespace\")\n\tuserIPHeader       = http.CanonicalHeaderKey(\"X-AppEngine-User-IP\")\n\tremoteAddrHeader   = http.CanonicalHeaderKey(\"X-AppEngine-Remote-Addr\")\n\n\t// Outgoing headers.\n\tapiEndpointHeader      = http.CanonicalHeaderKey(\"X-Google-RPC-Service-Endpoint\")\n\tapiEndpointHeaderValue = []string{\"app-engine-apis\"}\n\tapiMethodHeader        = http.CanonicalHeaderKey(\"X-Google-RPC-Service-Method\")\n\tapiMethodHeaderValue   = []string{\"/VMRemoteAPI.CallRemoteAPI\"}\n\tapiDeadlineHeader      = http.CanonicalHeaderKey(\"X-Google-RPC-Service-Deadline\")\n\tapiContentType         = http.CanonicalHeaderKey(\"Content-Type\")\n\tapiContentTypeValue    = []string{\"application/octet-stream\"}\n\tlogFlushHeader         = http.CanonicalHeaderKey(\"X-AppEngine-Log-Flush-Count\")\n\n\tapiHTTPClient = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDial:  limitDial,\n\t\t},\n\t}\n)\n\nfunc apiURL() *url.URL {\n\thost, port := \"appengine.googleapis.internal\", \"10001\"\n\tif h := os.Getenv(\"API_HOST\"); h != \"\" {\n\t\thost = h\n\t}\n\tif p := os.Getenv(\"API_PORT\"); p != \"\" {\n\t\tport = p\n\t}\n\treturn &url.URL{\n\t\tScheme: \"http\",\n\t\tHost:   host + \":\" + port,\n\t\tPath:   apiPath,\n\t}\n}\n\nfunc handleHTTP(w http.ResponseWriter, r *http.Request) {\n\tc := &context{\n\t\treq:       r,\n\t\toutHeader: w.Header(),\n\t\tapiURL:    apiURL(),\n\t}\n\tstopFlushing := make(chan int)\n\n\tctxs.Lock()\n\tctxs.m[r] = c\n\tctxs.Unlock()\n\tdefer func() {\n\t\tctxs.Lock()\n\t\tdelete(ctxs.m, r)\n\t\tctxs.Unlock()\n\t}()\n\n\t// Patch up RemoteAddr so it looks reasonable.\n\tif addr := r.Header.Get(userIPHeader); addr != \"\" {\n\t\tr.RemoteAddr = addr\n\t} else if addr = r.Header.Get(remoteAddrHeader); addr != \"\" {\n\t\tr.RemoteAddr = addr\n\t} else {\n\t\t// Should not normally reach here, but pick a sensible default anyway.\n\t\tr.RemoteAddr = \"127.0.0.1\"\n\t}\n\t// The address in the headers will most likely be of these forms:\n\t//\t123.123.123.123\n\t//\t2001:db8::1\n\t// net/http.Request.RemoteAddr is specified to be in \"IP:port\" form.\n\tif _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {\n\t\t// Assume the remote address is only a host; add a default port.\n\t\tr.RemoteAddr = net.JoinHostPort(r.RemoteAddr, \"80\")\n\t}\n\n\t// Start goroutine responsible for flushing app logs.\n\t// This is done after adding c to ctx.m (and stopped before removing it)\n\t// because flushing logs requires making an API call.\n\tgo c.logFlusher(stopFlushing)\n\n\texecuteRequestSafely(c, r)\n\tc.outHeader = nil // make sure header changes aren't respected any more\n\n\tstopFlushing <- 1 // any logging beyond this point will be dropped\n\n\t// Flush any pending logs asynchronously.\n\tc.pendingLogs.Lock()\n\tflushes := c.pendingLogs.flushes\n\tif len(c.pendingLogs.lines) > 0 {\n\t\tflushes++\n\t}\n\tc.pendingLogs.Unlock()\n\tgo c.flushLog(false)\n\tw.Header().Set(logFlushHeader, strconv.Itoa(flushes))\n\n\t// Avoid nil Write call if c.Write is never called.\n\tif c.outCode != 0 {\n\t\tw.WriteHeader(c.outCode)\n\t}\n\tif c.outBody != nil {\n\t\tw.Write(c.outBody)\n\t}\n}\n\nfunc executeRequestSafely(c *context, r *http.Request) {\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\tlogf(c, 4, \"%s\", renderPanic(x)) // 4 == critical\n\t\t\tc.outCode = 500\n\t\t}\n\t}()\n\n\thttp.DefaultServeMux.ServeHTTP(c, r)\n}\n\nfunc renderPanic(x interface{}) string {\n\tbuf := make([]byte, 16<<10) // 16 KB should be plenty\n\tbuf = buf[:runtime.Stack(buf, false)]\n\n\t// Remove the first few stack frames:\n\t//   this func\n\t//   the recover closure in the caller\n\t// That will root the stack trace at the site of the panic.\n\tconst (\n\t\tskipStart  = \"internal.renderPanic\"\n\t\tskipFrames = 2\n\t)\n\tstart := bytes.Index(buf, []byte(skipStart))\n\tp := start\n\tfor i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {\n\t\tp = bytes.IndexByte(buf[p+1:], '\\n') + p + 1\n\t\tif p < 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\tif p >= 0 {\n\t\t// buf[start:p+1] is the block to remove.\n\t\t// Copy buf[p+1:] over buf[start:] and shrink buf.\n\t\tcopy(buf[start:], buf[p+1:])\n\t\tbuf = buf[:len(buf)-(p+1-start)]\n\t}\n\n\t// Add panic heading.\n\thead := fmt.Sprintf(\"panic: %v\\n\\n\", x)\n\tif len(head) > len(buf) {\n\t\t// Extremely unlikely to happen.\n\t\treturn head\n\t}\n\tcopy(buf[len(head):], buf)\n\tcopy(buf, head)\n\n\treturn string(buf)\n}\n\nvar ctxs = struct {\n\tsync.Mutex\n\tm  map[*http.Request]*context\n\tbg *context // background context, lazily initialized\n\t// dec is used by tests to decorate the netcontext.Context returned\n\t// for a given request. This allows tests to add overrides (such as\n\t// WithAppIDOverride) to the context. The map is nil outside tests.\n\tdec map[*http.Request]func(netcontext.Context) netcontext.Context\n}{\n\tm: make(map[*http.Request]*context),\n}\n\n// context represents the context of an in-flight HTTP request.\n// It implements the appengine.Context and http.ResponseWriter interfaces.\ntype context struct {\n\treq *http.Request\n\n\toutCode   int\n\toutHeader http.Header\n\toutBody   []byte\n\n\tpendingLogs struct {\n\t\tsync.Mutex\n\t\tlines   []*logpb.UserAppLogLine\n\t\tflushes int\n\t}\n\n\tapiURL *url.URL\n}\n\nvar contextKey = \"holds a *context\"\n\nfunc fromContext(ctx netcontext.Context) *context {\n\tc, _ := ctx.Value(&contextKey).(*context)\n\treturn c\n}\n\nfunc withContext(parent netcontext.Context, c *context) netcontext.Context {\n\tctx := netcontext.WithValue(parent, &contextKey, c)\n\tif ns := c.req.Header.Get(curNamespaceHeader); ns != \"\" {\n\t\tctx = withNamespace(ctx, ns)\n\t}\n\treturn ctx\n}\n\nfunc toContext(c *context) netcontext.Context {\n\treturn withContext(netcontext.Background(), c)\n}\n\nfunc IncomingHeaders(ctx netcontext.Context) http.Header {\n\tif c := fromContext(ctx); c != nil {\n\t\treturn c.req.Header\n\t}\n\treturn nil\n}\n\nfunc WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {\n\tctxs.Lock()\n\tc := ctxs.m[req]\n\td := ctxs.dec[req]\n\tctxs.Unlock()\n\n\tif d != nil {\n\t\tparent = d(parent)\n\t}\n\n\tif c == nil {\n\t\t// Someone passed in an http.Request that is not in-flight.\n\t\t// We panic here rather than panicking at a later point\n\t\t// so that stack traces will be more sensible.\n\t\tlog.Panic(\"appengine: NewContext passed an unknown http.Request\")\n\t}\n\treturn withContext(parent, c)\n}\n\nfunc BackgroundContext() netcontext.Context {\n\tctxs.Lock()\n\tdefer ctxs.Unlock()\n\n\tif ctxs.bg != nil {\n\t\treturn toContext(ctxs.bg)\n\t}\n\n\t// Compute background security ticket.\n\tappID := partitionlessAppID()\n\tescAppID := strings.Replace(strings.Replace(appID, \":\", \"_\", -1), \".\", \"_\", -1)\n\tmajVersion := VersionID(nil)\n\tif i := strings.Index(majVersion, \".\"); i > 0 {\n\t\tmajVersion = majVersion[:i]\n\t}\n\tticket := fmt.Sprintf(\"%s/%s.%s.%s\", escAppID, ModuleName(nil), majVersion, InstanceID())\n\n\tctxs.bg = &context{\n\t\treq: &http.Request{\n\t\t\tHeader: http.Header{\n\t\t\t\tticketHeader: []string{ticket},\n\t\t\t},\n\t\t},\n\t\tapiURL: apiURL(),\n\t}\n\n\t// TODO(dsymonds): Wire up the shutdown handler to do a final flush.\n\tgo ctxs.bg.logFlusher(make(chan int))\n\n\treturn toContext(ctxs.bg)\n}\n\n// RegisterTestRequest registers the HTTP request req for testing, such that\n// any API calls are sent to the provided URL. It returns a closure to delete\n// the registration.\n// It should only be used by aetest package.\nfunc RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() {\n\tc := &context{\n\t\treq:    req,\n\t\tapiURL: apiURL,\n\t}\n\tctxs.Lock()\n\tdefer ctxs.Unlock()\n\tif _, ok := ctxs.m[req]; ok {\n\t\tlog.Panic(\"req already associated with context\")\n\t}\n\tif _, ok := ctxs.dec[req]; ok {\n\t\tlog.Panic(\"req already associated with context\")\n\t}\n\tif ctxs.dec == nil {\n\t\tctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)\n\t}\n\tctxs.m[req] = c\n\tctxs.dec[req] = decorate\n\n\treturn func() {\n\t\tctxs.Lock()\n\t\tdelete(ctxs.m, req)\n\t\tdelete(ctxs.dec, req)\n\t\tctxs.Unlock()\n\t}\n}\n\nvar errTimeout = &CallError{\n\tDetail:  \"Deadline exceeded\",\n\tCode:    int32(remotepb.RpcError_CANCELLED),\n\tTimeout: true,\n}\n\nfunc (c *context) Header() http.Header { return c.outHeader }\n\n// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status\n// codes do not permit a response body (nor response entity headers such as\n// Content-Length, Content-Type, etc).\nfunc bodyAllowedForStatus(status int) bool {\n\tswitch {\n\tcase status >= 100 && status <= 199:\n\t\treturn false\n\tcase status == 204:\n\t\treturn false\n\tcase status == 304:\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (c *context) Write(b []byte) (int, error) {\n\tif c.outCode == 0 {\n\t\tc.WriteHeader(http.StatusOK)\n\t}\n\tif len(b) > 0 && !bodyAllowedForStatus(c.outCode) {\n\t\treturn 0, http.ErrBodyNotAllowed\n\t}\n\tc.outBody = append(c.outBody, b...)\n\treturn len(b), nil\n}\n\nfunc (c *context) WriteHeader(code int) {\n\tif c.outCode != 0 {\n\t\tlogf(c, 3, \"WriteHeader called multiple times on request.\") // error level\n\t\treturn\n\t}\n\tc.outCode = code\n}\n\nfunc (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {\n\threq := &http.Request{\n\t\tMethod: \"POST\",\n\t\tURL:    c.apiURL,\n\t\tHeader: http.Header{\n\t\t\tapiEndpointHeader: apiEndpointHeaderValue,\n\t\t\tapiMethodHeader:   apiMethodHeaderValue,\n\t\t\tapiContentType:    apiContentTypeValue,\n\t\t\tapiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},\n\t\t},\n\t\tBody:          ioutil.NopCloser(bytes.NewReader(body)),\n\t\tContentLength: int64(len(body)),\n\t\tHost:          c.apiURL.Host,\n\t}\n\tif info := c.req.Header.Get(dapperHeader); info != \"\" {\n\t\threq.Header.Set(dapperHeader, info)\n\t}\n\tif info := c.req.Header.Get(traceHeader); info != \"\" {\n\t\threq.Header.Set(traceHeader, info)\n\t}\n\n\ttr := apiHTTPClient.Transport.(*http.Transport)\n\n\tvar timedOut int32 // atomic; set to 1 if timed out\n\tt := time.AfterFunc(timeout, func() {\n\t\tatomic.StoreInt32(&timedOut, 1)\n\t\ttr.CancelRequest(hreq)\n\t})\n\tdefer t.Stop()\n\tdefer func() {\n\t\t// Check if timeout was exceeded.\n\t\tif atomic.LoadInt32(&timedOut) != 0 {\n\t\t\terr = errTimeout\n\t\t}\n\t}()\n\n\thresp, err := apiHTTPClient.Do(hreq)\n\tif err != nil {\n\t\treturn nil, &CallError{\n\t\t\tDetail: fmt.Sprintf(\"service bridge HTTP failed: %v\", err),\n\t\t\tCode:   int32(remotepb.RpcError_UNKNOWN),\n\t\t}\n\t}\n\tdefer hresp.Body.Close()\n\threspBody, err := ioutil.ReadAll(hresp.Body)\n\tif hresp.StatusCode != 200 {\n\t\treturn nil, &CallError{\n\t\t\tDetail: fmt.Sprintf(\"service bridge returned HTTP %d (%q)\", hresp.StatusCode, hrespBody),\n\t\t\tCode:   int32(remotepb.RpcError_UNKNOWN),\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, &CallError{\n\t\t\tDetail: fmt.Sprintf(\"service bridge response bad: %v\", err),\n\t\t\tCode:   int32(remotepb.RpcError_UNKNOWN),\n\t\t}\n\t}\n\treturn hrespBody, nil\n}\n\nfunc Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {\n\tif f, ctx, ok := callOverrideFromContext(ctx); ok {\n\t\treturn f(ctx, service, method, in, out)\n\t}\n\n\t// Handle already-done contexts quickly.\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t}\n\n\tc := fromContext(ctx)\n\tif c == nil {\n\t\t// Give a good error message rather than a panic lower down.\n\t\treturn errors.New(\"not an App Engine context\")\n\t}\n\n\t// Apply transaction modifications if we're in a transaction.\n\tif t := transactionFromContext(ctx); t != nil {\n\t\tif t.finished {\n\t\t\treturn errors.New(\"transaction context has expired\")\n\t\t}\n\t\tapplyTransaction(in, &t.transaction)\n\t}\n\n\t// Default RPC timeout is 60s.\n\ttimeout := 60 * time.Second\n\tif deadline, ok := ctx.Deadline(); ok {\n\t\ttimeout = deadline.Sub(time.Now())\n\t}\n\n\tdata, err := proto.Marshal(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tticket := c.req.Header.Get(ticketHeader)\n\treq := &remotepb.Request{\n\t\tServiceName: &service,\n\t\tMethod:      &method,\n\t\tRequest:     data,\n\t\tRequestId:   &ticket,\n\t}\n\threqBody, err := proto.Marshal(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\threspBody, err := c.post(hreqBody, timeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres := &remotepb.Response{}\n\tif err := proto.Unmarshal(hrespBody, res); err != nil {\n\t\treturn err\n\t}\n\tif res.RpcError != nil {\n\t\tce := &CallError{\n\t\t\tDetail: res.RpcError.GetDetail(),\n\t\t\tCode:   *res.RpcError.Code,\n\t\t}\n\t\tswitch remotepb.RpcError_ErrorCode(ce.Code) {\n\t\tcase remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:\n\t\t\tce.Timeout = true\n\t\t}\n\t\treturn ce\n\t}\n\tif res.ApplicationError != nil {\n\t\treturn &APIError{\n\t\t\tService: *req.ServiceName,\n\t\t\tDetail:  res.ApplicationError.GetDetail(),\n\t\t\tCode:    *res.ApplicationError.Code,\n\t\t}\n\t}\n\tif res.Exception != nil || res.JavaException != nil {\n\t\t// This shouldn't happen, but let's be defensive.\n\t\treturn &CallError{\n\t\t\tDetail: \"service bridge returned exception\",\n\t\t\tCode:   int32(remotepb.RpcError_UNKNOWN),\n\t\t}\n\t}\n\treturn proto.Unmarshal(res.Response, out)\n}\n\nfunc (c *context) Request() *http.Request {\n\treturn c.req\n}\n\nfunc (c *context) addLogLine(ll *logpb.UserAppLogLine) {\n\t// Truncate long log lines.\n\t// TODO(dsymonds): Check if this is still necessary.\n\tconst lim = 8 << 10\n\tif len(*ll.Message) > lim {\n\t\tsuffix := fmt.Sprintf(\"...(length %d)\", len(*ll.Message))\n\t\tll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)\n\t}\n\n\tc.pendingLogs.Lock()\n\tc.pendingLogs.lines = append(c.pendingLogs.lines, ll)\n\tc.pendingLogs.Unlock()\n}\n\nvar logLevelName = map[int64]string{\n\t0: \"DEBUG\",\n\t1: \"INFO\",\n\t2: \"WARNING\",\n\t3: \"ERROR\",\n\t4: \"CRITICAL\",\n}\n\nfunc logf(c *context, level int64, format string, args ...interface{}) {\n\ts := fmt.Sprintf(format, args...)\n\ts = strings.TrimRight(s, \"\\n\") // Remove any trailing newline characters.\n\tc.addLogLine(&logpb.UserAppLogLine{\n\t\tTimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),\n\t\tLevel:         &level,\n\t\tMessage:       &s,\n\t})\n\tlog.Print(logLevelName[level] + \": \" + s)\n}\n\n// flushLog attempts to flush any pending logs to the appserver.\n// It should not be called concurrently.\nfunc (c *context) flushLog(force bool) (flushed bool) {\n\tc.pendingLogs.Lock()\n\t// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.\n\tn, rem := 0, 30<<20\n\tfor ; n < len(c.pendingLogs.lines); n++ {\n\t\tll := c.pendingLogs.lines[n]\n\t\t// Each log line will require about 3 bytes of overhead.\n\t\tnb := proto.Size(ll) + 3\n\t\tif nb > rem {\n\t\t\tbreak\n\t\t}\n\t\trem -= nb\n\t}\n\tlines := c.pendingLogs.lines[:n]\n\tc.pendingLogs.lines = c.pendingLogs.lines[n:]\n\tc.pendingLogs.Unlock()\n\n\tif len(lines) == 0 && !force {\n\t\t// Nothing to flush.\n\t\treturn false\n\t}\n\n\trescueLogs := false\n\tdefer func() {\n\t\tif rescueLogs {\n\t\t\tc.pendingLogs.Lock()\n\t\t\tc.pendingLogs.lines = append(lines, c.pendingLogs.lines...)\n\t\t\tc.pendingLogs.Unlock()\n\t\t}\n\t}()\n\n\tbuf, err := proto.Marshal(&logpb.UserAppLogGroup{\n\t\tLogLine: lines,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"internal.flushLog: marshaling UserAppLogGroup: %v\", err)\n\t\trescueLogs = true\n\t\treturn false\n\t}\n\n\treq := &logpb.FlushRequest{\n\t\tLogs: buf,\n\t}\n\tres := &basepb.VoidProto{}\n\tc.pendingLogs.Lock()\n\tc.pendingLogs.flushes++\n\tc.pendingLogs.Unlock()\n\tif err := Call(toContext(c), \"logservice\", \"Flush\", req, res); err != nil {\n\t\tlog.Printf(\"internal.flushLog: Flush RPC: %v\", err)\n\t\trescueLogs = true\n\t\treturn false\n\t}\n\treturn true\n}\n\nconst (\n\t// Log flushing parameters.\n\tflushInterval      = 1 * time.Second\n\tforceFlushInterval = 60 * time.Second\n)\n\nfunc (c *context) logFlusher(stop <-chan int) {\n\tlastFlush := time.Now()\n\ttick := time.NewTicker(flushInterval)\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\t// Request finished.\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\tforce := time.Now().Sub(lastFlush) > forceFlushInterval\n\t\t\tif c.flushLog(force) {\n\t\t\t\tlastFlush = time.Now()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc ContextForTesting(req *http.Request) netcontext.Context {\n\treturn toContext(&context{req: req})\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/api_classic.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build appengine\n\npackage internal\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"appengine\"\n\t\"appengine_internal\"\n\tbasepb \"appengine_internal/base\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tnetcontext \"golang.org/x/net/context\"\n)\n\nvar contextKey = \"holds an appengine.Context\"\n\nfunc fromContext(ctx netcontext.Context) appengine.Context {\n\tc, _ := ctx.Value(&contextKey).(appengine.Context)\n\treturn c\n}\n\n// This is only for classic App Engine adapters.\nfunc ClassicContextFromContext(ctx netcontext.Context) appengine.Context {\n\treturn fromContext(ctx)\n}\n\nfunc withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {\n\tctx := netcontext.WithValue(parent, &contextKey, c)\n\n\ts := &basepb.StringProto{}\n\tc.Call(\"__go__\", \"GetNamespace\", &basepb.VoidProto{}, s, nil)\n\tif ns := s.GetValue(); ns != \"\" {\n\t\tctx = NamespacedContext(ctx, ns)\n\t}\n\n\treturn ctx\n}\n\nfunc IncomingHeaders(ctx netcontext.Context) http.Header {\n\tif c := fromContext(ctx); c != nil {\n\t\tif req, ok := c.Request().(*http.Request); ok {\n\t\t\treturn req.Header\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {\n\tc := appengine.NewContext(req)\n\treturn withContext(parent, c)\n}\n\nfunc Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {\n\tif f, ctx, ok := callOverrideFromContext(ctx); ok {\n\t\treturn f(ctx, service, method, in, out)\n\t}\n\n\t// Handle already-done contexts quickly.\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t}\n\n\tc := fromContext(ctx)\n\tif c == nil {\n\t\t// Give a good error message rather than a panic lower down.\n\t\treturn errors.New(\"not an App Engine context\")\n\t}\n\n\t// Apply transaction modifications if we're in a transaction.\n\tif t := transactionFromContext(ctx); t != nil {\n\t\tif t.finished {\n\t\t\treturn errors.New(\"transaction context has expired\")\n\t\t}\n\t\tapplyTransaction(in, &t.transaction)\n\t}\n\n\tvar opts *appengine_internal.CallOptions\n\tif d, ok := ctx.Deadline(); ok {\n\t\topts = &appengine_internal.CallOptions{\n\t\t\tTimeout: d.Sub(time.Now()),\n\t\t}\n\t}\n\n\terr := c.Call(service, method, in, out, opts)\n\tswitch v := err.(type) {\n\tcase *appengine_internal.APIError:\n\t\treturn &APIError{\n\t\t\tService: v.Service,\n\t\t\tDetail:  v.Detail,\n\t\t\tCode:    v.Code,\n\t\t}\n\tcase *appengine_internal.CallError:\n\t\treturn &CallError{\n\t\t\tDetail:  v.Detail,\n\t\t\tCode:    v.Code,\n\t\t\tTimeout: v.Timeout,\n\t\t}\n\t}\n\treturn err\n}\n\nfunc handleHTTP(w http.ResponseWriter, r *http.Request) {\n\tpanic(\"handleHTTP called; this should be impossible\")\n}\n\nfunc logf(c appengine.Context, level int64, format string, args ...interface{}) {\n\tvar fn func(format string, args ...interface{})\n\tswitch level {\n\tcase 0:\n\t\tfn = c.Debugf\n\tcase 1:\n\t\tfn = c.Infof\n\tcase 2:\n\t\tfn = c.Warningf\n\tcase 3:\n\t\tfn = c.Errorf\n\tcase 4:\n\t\tfn = c.Criticalf\n\tdefault:\n\t\t// This shouldn't happen.\n\t\tfn = c.Criticalf\n\t}\n\tfn(format, args...)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/api_common.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"github.com/golang/protobuf/proto\"\n\tnetcontext \"golang.org/x/net/context\"\n)\n\ntype CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error\n\nvar callOverrideKey = \"holds []CallOverrideFunc\"\n\nfunc WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {\n\t// We avoid appending to any existing call override\n\t// so we don't risk overwriting a popped stack below.\n\tvar cofs []CallOverrideFunc\n\tif uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {\n\t\tcofs = append(cofs, uf...)\n\t}\n\tcofs = append(cofs, f)\n\treturn netcontext.WithValue(ctx, &callOverrideKey, cofs)\n}\n\nfunc callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {\n\tcofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)\n\tif len(cofs) == 0 {\n\t\treturn nil, nil, false\n\t}\n\t// We found a list of overrides; grab the last, and reconstitute a\n\t// context that will hide it.\n\tf := cofs[len(cofs)-1]\n\tctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])\n\treturn f, ctx, true\n}\n\ntype logOverrideFunc func(level int64, format string, args ...interface{})\n\nvar logOverrideKey = \"holds a logOverrideFunc\"\n\nfunc WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {\n\treturn netcontext.WithValue(ctx, &logOverrideKey, f)\n}\n\nvar appIDOverrideKey = \"holds a string, being the full app ID\"\n\nfunc WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {\n\treturn netcontext.WithValue(ctx, &appIDOverrideKey, appID)\n}\n\nvar namespaceKey = \"holds the namespace string\"\n\nfunc withNamespace(ctx netcontext.Context, ns string) netcontext.Context {\n\treturn netcontext.WithValue(ctx, &namespaceKey, ns)\n}\n\nfunc NamespaceFromContext(ctx netcontext.Context) string {\n\t// If there's no namespace, return the empty string.\n\tns, _ := ctx.Value(&namespaceKey).(string)\n\treturn ns\n}\n\n// FullyQualifiedAppID returns the fully-qualified application ID.\n// This may contain a partition prefix (e.g. \"s~\" for High Replication apps),\n// or a domain prefix (e.g. \"example.com:\").\nfunc FullyQualifiedAppID(ctx netcontext.Context) string {\n\tif id, ok := ctx.Value(&appIDOverrideKey).(string); ok {\n\t\treturn id\n\t}\n\treturn fullyQualifiedAppID(ctx)\n}\n\nfunc Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {\n\tif f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {\n\t\tf(level, format, args...)\n\t\treturn\n\t}\n\tlogf(fromContext(ctx), level, format, args...)\n}\n\n// NamespacedContext wraps a Context to support namespaces.\nfunc NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {\n\tn := &namespacedContext{\n\t\tnamespace: namespace,\n\t}\n\treturn withNamespace(WithCallOverride(ctx, n.call), namespace)\n}\n\ntype namespacedContext struct {\n\tnamespace string\n}\n\nfunc (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error {\n\t// Apply any namespace mods.\n\tif mod, ok := NamespaceMods[service]; ok {\n\t\tmod(in, n.namespace)\n\t}\n\treturn Call(ctx, service, method, in, out)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/app_id.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"strings\"\n)\n\nfunc parseFullAppID(appid string) (partition, domain, displayID string) {\n\tif i := strings.Index(appid, \"~\"); i != -1 {\n\t\tpartition, appid = appid[:i], appid[i+1:]\n\t}\n\tif i := strings.Index(appid, \":\"); i != -1 {\n\t\tdomain, appid = appid[:i], appid[i+1:]\n\t}\n\treturn partition, domain, appid\n}\n\n// appID returns \"appid\" or \"domain.com:appid\".\nfunc appID(fullAppID string) string {\n\t_, dom, dis := parseFullAppID(fullAppID)\n\tif dom != \"\" {\n\t\treturn dom + \":\" + dis\n\t}\n\treturn dis\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto\n// DO NOT EDIT!\n\n/*\nPackage app_identity is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/app_identity/app_identity_service.proto\n\nIt has these top-level messages:\n\tAppIdentityServiceError\n\tSignForAppRequest\n\tSignForAppResponse\n\tGetPublicCertificateForAppRequest\n\tPublicCertificate\n\tGetPublicCertificateForAppResponse\n\tGetServiceAccountNameRequest\n\tGetServiceAccountNameResponse\n\tGetAccessTokenRequest\n\tGetAccessTokenResponse\n\tGetDefaultGcsBucketNameRequest\n\tGetDefaultGcsBucketNameResponse\n*/\npackage app_identity\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype AppIdentityServiceError_ErrorCode int32\n\nconst (\n\tAppIdentityServiceError_SUCCESS           AppIdentityServiceError_ErrorCode = 0\n\tAppIdentityServiceError_UNKNOWN_SCOPE     AppIdentityServiceError_ErrorCode = 9\n\tAppIdentityServiceError_BLOB_TOO_LARGE    AppIdentityServiceError_ErrorCode = 1000\n\tAppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001\n\tAppIdentityServiceError_NOT_A_VALID_APP   AppIdentityServiceError_ErrorCode = 1002\n\tAppIdentityServiceError_UNKNOWN_ERROR     AppIdentityServiceError_ErrorCode = 1003\n\tAppIdentityServiceError_NOT_ALLOWED       AppIdentityServiceError_ErrorCode = 1005\n\tAppIdentityServiceError_NOT_IMPLEMENTED   AppIdentityServiceError_ErrorCode = 1006\n)\n\nvar AppIdentityServiceError_ErrorCode_name = map[int32]string{\n\t0:    \"SUCCESS\",\n\t9:    \"UNKNOWN_SCOPE\",\n\t1000: \"BLOB_TOO_LARGE\",\n\t1001: \"DEADLINE_EXCEEDED\",\n\t1002: \"NOT_A_VALID_APP\",\n\t1003: \"UNKNOWN_ERROR\",\n\t1005: \"NOT_ALLOWED\",\n\t1006: \"NOT_IMPLEMENTED\",\n}\nvar AppIdentityServiceError_ErrorCode_value = map[string]int32{\n\t\"SUCCESS\":           0,\n\t\"UNKNOWN_SCOPE\":     9,\n\t\"BLOB_TOO_LARGE\":    1000,\n\t\"DEADLINE_EXCEEDED\": 1001,\n\t\"NOT_A_VALID_APP\":   1002,\n\t\"UNKNOWN_ERROR\":     1003,\n\t\"NOT_ALLOWED\":       1005,\n\t\"NOT_IMPLEMENTED\":   1006,\n}\n\nfunc (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {\n\tp := new(AppIdentityServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x AppIdentityServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, \"AppIdentityServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = AppIdentityServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype AppIdentityServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *AppIdentityServiceError) Reset()         { *m = AppIdentityServiceError{} }\nfunc (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*AppIdentityServiceError) ProtoMessage()    {}\n\ntype SignForAppRequest struct {\n\tBytesToSign      []byte `protobuf:\"bytes,1,opt,name=bytes_to_sign\" json:\"bytes_to_sign,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SignForAppRequest) Reset()         { *m = SignForAppRequest{} }\nfunc (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SignForAppRequest) ProtoMessage()    {}\n\nfunc (m *SignForAppRequest) GetBytesToSign() []byte {\n\tif m != nil {\n\t\treturn m.BytesToSign\n\t}\n\treturn nil\n}\n\ntype SignForAppResponse struct {\n\tKeyName          *string `protobuf:\"bytes,1,opt,name=key_name\" json:\"key_name,omitempty\"`\n\tSignatureBytes   []byte  `protobuf:\"bytes,2,opt,name=signature_bytes\" json:\"signature_bytes,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SignForAppResponse) Reset()         { *m = SignForAppResponse{} }\nfunc (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }\nfunc (*SignForAppResponse) ProtoMessage()    {}\n\nfunc (m *SignForAppResponse) GetKeyName() string {\n\tif m != nil && m.KeyName != nil {\n\t\treturn *m.KeyName\n\t}\n\treturn \"\"\n}\n\nfunc (m *SignForAppResponse) GetSignatureBytes() []byte {\n\tif m != nil {\n\t\treturn m.SignatureBytes\n\t}\n\treturn nil\n}\n\ntype GetPublicCertificateForAppRequest struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *GetPublicCertificateForAppRequest) Reset()         { *m = GetPublicCertificateForAppRequest{} }\nfunc (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetPublicCertificateForAppRequest) ProtoMessage()    {}\n\ntype PublicCertificate struct {\n\tKeyName            *string `protobuf:\"bytes,1,opt,name=key_name\" json:\"key_name,omitempty\"`\n\tX509CertificatePem *string `protobuf:\"bytes,2,opt,name=x509_certificate_pem\" json:\"x509_certificate_pem,omitempty\"`\n\tXXX_unrecognized   []byte  `json:\"-\"`\n}\n\nfunc (m *PublicCertificate) Reset()         { *m = PublicCertificate{} }\nfunc (m *PublicCertificate) String() string { return proto.CompactTextString(m) }\nfunc (*PublicCertificate) ProtoMessage()    {}\n\nfunc (m *PublicCertificate) GetKeyName() string {\n\tif m != nil && m.KeyName != nil {\n\t\treturn *m.KeyName\n\t}\n\treturn \"\"\n}\n\nfunc (m *PublicCertificate) GetX509CertificatePem() string {\n\tif m != nil && m.X509CertificatePem != nil {\n\t\treturn *m.X509CertificatePem\n\t}\n\treturn \"\"\n}\n\ntype GetPublicCertificateForAppResponse struct {\n\tPublicCertificateList      []*PublicCertificate `protobuf:\"bytes,1,rep,name=public_certificate_list\" json:\"public_certificate_list,omitempty\"`\n\tMaxClientCacheTimeInSecond *int64               `protobuf:\"varint,2,opt,name=max_client_cache_time_in_second\" json:\"max_client_cache_time_in_second,omitempty\"`\n\tXXX_unrecognized           []byte               `json:\"-\"`\n}\n\nfunc (m *GetPublicCertificateForAppResponse) Reset()         { *m = GetPublicCertificateForAppResponse{} }\nfunc (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetPublicCertificateForAppResponse) ProtoMessage()    {}\n\nfunc (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {\n\tif m != nil {\n\t\treturn m.PublicCertificateList\n\t}\n\treturn nil\n}\n\nfunc (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {\n\tif m != nil && m.MaxClientCacheTimeInSecond != nil {\n\t\treturn *m.MaxClientCacheTimeInSecond\n\t}\n\treturn 0\n}\n\ntype GetServiceAccountNameRequest struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *GetServiceAccountNameRequest) Reset()         { *m = GetServiceAccountNameRequest{} }\nfunc (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetServiceAccountNameRequest) ProtoMessage()    {}\n\ntype GetServiceAccountNameResponse struct {\n\tServiceAccountName *string `protobuf:\"bytes,1,opt,name=service_account_name\" json:\"service_account_name,omitempty\"`\n\tXXX_unrecognized   []byte  `json:\"-\"`\n}\n\nfunc (m *GetServiceAccountNameResponse) Reset()         { *m = GetServiceAccountNameResponse{} }\nfunc (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetServiceAccountNameResponse) ProtoMessage()    {}\n\nfunc (m *GetServiceAccountNameResponse) GetServiceAccountName() string {\n\tif m != nil && m.ServiceAccountName != nil {\n\t\treturn *m.ServiceAccountName\n\t}\n\treturn \"\"\n}\n\ntype GetAccessTokenRequest struct {\n\tScope              []string `protobuf:\"bytes,1,rep,name=scope\" json:\"scope,omitempty\"`\n\tServiceAccountId   *int64   `protobuf:\"varint,2,opt,name=service_account_id\" json:\"service_account_id,omitempty\"`\n\tServiceAccountName *string  `protobuf:\"bytes,3,opt,name=service_account_name\" json:\"service_account_name,omitempty\"`\n\tXXX_unrecognized   []byte   `json:\"-\"`\n}\n\nfunc (m *GetAccessTokenRequest) Reset()         { *m = GetAccessTokenRequest{} }\nfunc (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetAccessTokenRequest) ProtoMessage()    {}\n\nfunc (m *GetAccessTokenRequest) GetScope() []string {\n\tif m != nil {\n\t\treturn m.Scope\n\t}\n\treturn nil\n}\n\nfunc (m *GetAccessTokenRequest) GetServiceAccountId() int64 {\n\tif m != nil && m.ServiceAccountId != nil {\n\t\treturn *m.ServiceAccountId\n\t}\n\treturn 0\n}\n\nfunc (m *GetAccessTokenRequest) GetServiceAccountName() string {\n\tif m != nil && m.ServiceAccountName != nil {\n\t\treturn *m.ServiceAccountName\n\t}\n\treturn \"\"\n}\n\ntype GetAccessTokenResponse struct {\n\tAccessToken      *string `protobuf:\"bytes,1,opt,name=access_token\" json:\"access_token,omitempty\"`\n\tExpirationTime   *int64  `protobuf:\"varint,2,opt,name=expiration_time\" json:\"expiration_time,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetAccessTokenResponse) Reset()         { *m = GetAccessTokenResponse{} }\nfunc (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetAccessTokenResponse) ProtoMessage()    {}\n\nfunc (m *GetAccessTokenResponse) GetAccessToken() string {\n\tif m != nil && m.AccessToken != nil {\n\t\treturn *m.AccessToken\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetAccessTokenResponse) GetExpirationTime() int64 {\n\tif m != nil && m.ExpirationTime != nil {\n\t\treturn *m.ExpirationTime\n\t}\n\treturn 0\n}\n\ntype GetDefaultGcsBucketNameRequest struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *GetDefaultGcsBucketNameRequest) Reset()         { *m = GetDefaultGcsBucketNameRequest{} }\nfunc (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetDefaultGcsBucketNameRequest) ProtoMessage()    {}\n\ntype GetDefaultGcsBucketNameResponse struct {\n\tDefaultGcsBucketName *string `protobuf:\"bytes,1,opt,name=default_gcs_bucket_name\" json:\"default_gcs_bucket_name,omitempty\"`\n\tXXX_unrecognized     []byte  `json:\"-\"`\n}\n\nfunc (m *GetDefaultGcsBucketNameResponse) Reset()         { *m = GetDefaultGcsBucketNameResponse{} }\nfunc (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetDefaultGcsBucketNameResponse) ProtoMessage()    {}\n\nfunc (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {\n\tif m != nil && m.DefaultGcsBucketName != nil {\n\t\treturn *m.DefaultGcsBucketName\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"app_identity\";\n\npackage appengine;\n\nmessage AppIdentityServiceError {\n  enum ErrorCode {\n    SUCCESS = 0;\n    UNKNOWN_SCOPE = 9;\n    BLOB_TOO_LARGE = 1000;\n    DEADLINE_EXCEEDED = 1001;\n    NOT_A_VALID_APP = 1002;\n    UNKNOWN_ERROR = 1003;\n    NOT_ALLOWED = 1005;\n    NOT_IMPLEMENTED = 1006;\n  }\n}\n\nmessage SignForAppRequest {\n  optional bytes bytes_to_sign = 1;\n}\n\nmessage SignForAppResponse {\n  optional string key_name = 1;\n  optional bytes signature_bytes = 2;\n}\n\nmessage GetPublicCertificateForAppRequest {\n}\n\nmessage PublicCertificate {\n  optional string key_name = 1;\n  optional string x509_certificate_pem = 2;\n}\n\nmessage GetPublicCertificateForAppResponse {\n  repeated PublicCertificate public_certificate_list = 1;\n  optional int64 max_client_cache_time_in_second = 2;\n}\n\nmessage GetServiceAccountNameRequest {\n}\n\nmessage GetServiceAccountNameResponse {\n  optional string service_account_name = 1;\n}\n\nmessage GetAccessTokenRequest {\n  repeated string scope = 1;\n  optional int64 service_account_id = 2;\n  optional string service_account_name = 3;\n}\n\nmessage GetAccessTokenResponse {\n  optional string access_token = 1;\n  optional int64 expiration_time = 2;\n}\n\nmessage GetDefaultGcsBucketNameRequest {\n}\n\nmessage GetDefaultGcsBucketNameResponse {\n  optional string default_gcs_bucket_name = 1;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/base/api_base.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/base/api_base.proto\n// DO NOT EDIT!\n\n/*\nPackage base is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/base/api_base.proto\n\nIt has these top-level messages:\n\tStringProto\n\tInteger32Proto\n\tInteger64Proto\n\tBoolProto\n\tDoubleProto\n\tBytesProto\n\tVoidProto\n*/\npackage base\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype StringProto struct {\n\tValue            *string `protobuf:\"bytes,1,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *StringProto) Reset()         { *m = StringProto{} }\nfunc (m *StringProto) String() string { return proto.CompactTextString(m) }\nfunc (*StringProto) ProtoMessage()    {}\n\nfunc (m *StringProto) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\ntype Integer32Proto struct {\n\tValue            *int32 `protobuf:\"varint,1,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Integer32Proto) Reset()         { *m = Integer32Proto{} }\nfunc (m *Integer32Proto) String() string { return proto.CompactTextString(m) }\nfunc (*Integer32Proto) ProtoMessage()    {}\n\nfunc (m *Integer32Proto) GetValue() int32 {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn 0\n}\n\ntype Integer64Proto struct {\n\tValue            *int64 `protobuf:\"varint,1,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Integer64Proto) Reset()         { *m = Integer64Proto{} }\nfunc (m *Integer64Proto) String() string { return proto.CompactTextString(m) }\nfunc (*Integer64Proto) ProtoMessage()    {}\n\nfunc (m *Integer64Proto) GetValue() int64 {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn 0\n}\n\ntype BoolProto struct {\n\tValue            *bool  `protobuf:\"varint,1,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *BoolProto) Reset()         { *m = BoolProto{} }\nfunc (m *BoolProto) String() string { return proto.CompactTextString(m) }\nfunc (*BoolProto) ProtoMessage()    {}\n\nfunc (m *BoolProto) GetValue() bool {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn false\n}\n\ntype DoubleProto struct {\n\tValue            *float64 `protobuf:\"fixed64,1,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *DoubleProto) Reset()         { *m = DoubleProto{} }\nfunc (m *DoubleProto) String() string { return proto.CompactTextString(m) }\nfunc (*DoubleProto) ProtoMessage()    {}\n\nfunc (m *DoubleProto) GetValue() float64 {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn 0\n}\n\ntype BytesProto struct {\n\tValue            []byte `protobuf:\"bytes,1,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *BytesProto) Reset()         { *m = BytesProto{} }\nfunc (m *BytesProto) String() string { return proto.CompactTextString(m) }\nfunc (*BytesProto) ProtoMessage()    {}\n\nfunc (m *BytesProto) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype VoidProto struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *VoidProto) Reset()         { *m = VoidProto{} }\nfunc (m *VoidProto) String() string { return proto.CompactTextString(m) }\nfunc (*VoidProto) ProtoMessage()    {}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/base/api_base.proto",
    "content": "// Built-in base types for API calls. Primarily useful as return types.\n\nsyntax = \"proto2\";\noption go_package = \"base\";\n\npackage appengine.base;\n\nmessage StringProto {\n  required string value = 1;\n}\n\nmessage Integer32Proto {\n  required int32 value = 1;\n}\n\nmessage Integer64Proto {\n  required int64 value = 1;\n}\n\nmessage BoolProto {\n  required bool value = 1;\n}\n\nmessage DoubleProto {\n  required double value = 1;\n}\n\nmessage BytesProto {\n  required bytes value = 1 [ctype=CORD];\n}\n\nmessage VoidProto {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto\n// DO NOT EDIT!\n\n/*\nPackage blobstore is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/blobstore/blobstore_service.proto\n\nIt has these top-level messages:\n\tBlobstoreServiceError\n\tCreateUploadURLRequest\n\tCreateUploadURLResponse\n\tDeleteBlobRequest\n\tFetchDataRequest\n\tFetchDataResponse\n\tCloneBlobRequest\n\tCloneBlobResponse\n\tDecodeBlobKeyRequest\n\tDecodeBlobKeyResponse\n\tCreateEncodedGoogleStorageKeyRequest\n\tCreateEncodedGoogleStorageKeyResponse\n*/\npackage blobstore\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype BlobstoreServiceError_ErrorCode int32\n\nconst (\n\tBlobstoreServiceError_OK                        BlobstoreServiceError_ErrorCode = 0\n\tBlobstoreServiceError_INTERNAL_ERROR            BlobstoreServiceError_ErrorCode = 1\n\tBlobstoreServiceError_URL_TOO_LONG              BlobstoreServiceError_ErrorCode = 2\n\tBlobstoreServiceError_PERMISSION_DENIED         BlobstoreServiceError_ErrorCode = 3\n\tBlobstoreServiceError_BLOB_NOT_FOUND            BlobstoreServiceError_ErrorCode = 4\n\tBlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE   BlobstoreServiceError_ErrorCode = 5\n\tBlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6\n\tBlobstoreServiceError_ARGUMENT_OUT_OF_RANGE     BlobstoreServiceError_ErrorCode = 8\n\tBlobstoreServiceError_INVALID_BLOB_KEY          BlobstoreServiceError_ErrorCode = 9\n)\n\nvar BlobstoreServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"INTERNAL_ERROR\",\n\t2: \"URL_TOO_LONG\",\n\t3: \"PERMISSION_DENIED\",\n\t4: \"BLOB_NOT_FOUND\",\n\t5: \"DATA_INDEX_OUT_OF_RANGE\",\n\t6: \"BLOB_FETCH_SIZE_TOO_LARGE\",\n\t8: \"ARGUMENT_OUT_OF_RANGE\",\n\t9: \"INVALID_BLOB_KEY\",\n}\nvar BlobstoreServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                        0,\n\t\"INTERNAL_ERROR\":            1,\n\t\"URL_TOO_LONG\":              2,\n\t\"PERMISSION_DENIED\":         3,\n\t\"BLOB_NOT_FOUND\":            4,\n\t\"DATA_INDEX_OUT_OF_RANGE\":   5,\n\t\"BLOB_FETCH_SIZE_TOO_LARGE\": 6,\n\t\"ARGUMENT_OUT_OF_RANGE\":     8,\n\t\"INVALID_BLOB_KEY\":          9,\n}\n\nfunc (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode {\n\tp := new(BlobstoreServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x BlobstoreServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, \"BlobstoreServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = BlobstoreServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype BlobstoreServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *BlobstoreServiceError) Reset()         { *m = BlobstoreServiceError{} }\nfunc (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*BlobstoreServiceError) ProtoMessage()    {}\n\ntype CreateUploadURLRequest struct {\n\tSuccessPath               *string `protobuf:\"bytes,1,req,name=success_path\" json:\"success_path,omitempty\"`\n\tMaxUploadSizeBytes        *int64  `protobuf:\"varint,2,opt,name=max_upload_size_bytes\" json:\"max_upload_size_bytes,omitempty\"`\n\tMaxUploadSizePerBlobBytes *int64  `protobuf:\"varint,3,opt,name=max_upload_size_per_blob_bytes\" json:\"max_upload_size_per_blob_bytes,omitempty\"`\n\tGsBucketName              *string `protobuf:\"bytes,4,opt,name=gs_bucket_name\" json:\"gs_bucket_name,omitempty\"`\n\tUrlExpiryTimeSeconds      *int32  `protobuf:\"varint,5,opt,name=url_expiry_time_seconds\" json:\"url_expiry_time_seconds,omitempty\"`\n\tXXX_unrecognized          []byte  `json:\"-\"`\n}\n\nfunc (m *CreateUploadURLRequest) Reset()         { *m = CreateUploadURLRequest{} }\nfunc (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateUploadURLRequest) ProtoMessage()    {}\n\nfunc (m *CreateUploadURLRequest) GetSuccessPath() string {\n\tif m != nil && m.SuccessPath != nil {\n\t\treturn *m.SuccessPath\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 {\n\tif m != nil && m.MaxUploadSizeBytes != nil {\n\t\treturn *m.MaxUploadSizeBytes\n\t}\n\treturn 0\n}\n\nfunc (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 {\n\tif m != nil && m.MaxUploadSizePerBlobBytes != nil {\n\t\treturn *m.MaxUploadSizePerBlobBytes\n\t}\n\treturn 0\n}\n\nfunc (m *CreateUploadURLRequest) GetGsBucketName() string {\n\tif m != nil && m.GsBucketName != nil {\n\t\treturn *m.GsBucketName\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 {\n\tif m != nil && m.UrlExpiryTimeSeconds != nil {\n\t\treturn *m.UrlExpiryTimeSeconds\n\t}\n\treturn 0\n}\n\ntype CreateUploadURLResponse struct {\n\tUrl              *string `protobuf:\"bytes,1,req,name=url\" json:\"url,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateUploadURLResponse) Reset()         { *m = CreateUploadURLResponse{} }\nfunc (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CreateUploadURLResponse) ProtoMessage()    {}\n\nfunc (m *CreateUploadURLResponse) GetUrl() string {\n\tif m != nil && m.Url != nil {\n\t\treturn *m.Url\n\t}\n\treturn \"\"\n}\n\ntype DeleteBlobRequest struct {\n\tBlobKey          []string `protobuf:\"bytes,1,rep,name=blob_key\" json:\"blob_key,omitempty\"`\n\tToken            *string  `protobuf:\"bytes,2,opt,name=token\" json:\"token,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *DeleteBlobRequest) Reset()         { *m = DeleteBlobRequest{} }\nfunc (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteBlobRequest) ProtoMessage()    {}\n\nfunc (m *DeleteBlobRequest) GetBlobKey() []string {\n\tif m != nil {\n\t\treturn m.BlobKey\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteBlobRequest) GetToken() string {\n\tif m != nil && m.Token != nil {\n\t\treturn *m.Token\n\t}\n\treturn \"\"\n}\n\ntype FetchDataRequest struct {\n\tBlobKey          *string `protobuf:\"bytes,1,req,name=blob_key\" json:\"blob_key,omitempty\"`\n\tStartIndex       *int64  `protobuf:\"varint,2,req,name=start_index\" json:\"start_index,omitempty\"`\n\tEndIndex         *int64  `protobuf:\"varint,3,req,name=end_index\" json:\"end_index,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *FetchDataRequest) Reset()         { *m = FetchDataRequest{} }\nfunc (m *FetchDataRequest) String() string { return proto.CompactTextString(m) }\nfunc (*FetchDataRequest) ProtoMessage()    {}\n\nfunc (m *FetchDataRequest) GetBlobKey() string {\n\tif m != nil && m.BlobKey != nil {\n\t\treturn *m.BlobKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *FetchDataRequest) GetStartIndex() int64 {\n\tif m != nil && m.StartIndex != nil {\n\t\treturn *m.StartIndex\n\t}\n\treturn 0\n}\n\nfunc (m *FetchDataRequest) GetEndIndex() int64 {\n\tif m != nil && m.EndIndex != nil {\n\t\treturn *m.EndIndex\n\t}\n\treturn 0\n}\n\ntype FetchDataResponse struct {\n\tData             []byte `protobuf:\"bytes,1000,req,name=data\" json:\"data,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *FetchDataResponse) Reset()         { *m = FetchDataResponse{} }\nfunc (m *FetchDataResponse) String() string { return proto.CompactTextString(m) }\nfunc (*FetchDataResponse) ProtoMessage()    {}\n\nfunc (m *FetchDataResponse) GetData() []byte {\n\tif m != nil {\n\t\treturn m.Data\n\t}\n\treturn nil\n}\n\ntype CloneBlobRequest struct {\n\tBlobKey          []byte `protobuf:\"bytes,1,req,name=blob_key\" json:\"blob_key,omitempty\"`\n\tMimeType         []byte `protobuf:\"bytes,2,req,name=mime_type\" json:\"mime_type,omitempty\"`\n\tTargetAppId      []byte `protobuf:\"bytes,3,req,name=target_app_id\" json:\"target_app_id,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *CloneBlobRequest) Reset()         { *m = CloneBlobRequest{} }\nfunc (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CloneBlobRequest) ProtoMessage()    {}\n\nfunc (m *CloneBlobRequest) GetBlobKey() []byte {\n\tif m != nil {\n\t\treturn m.BlobKey\n\t}\n\treturn nil\n}\n\nfunc (m *CloneBlobRequest) GetMimeType() []byte {\n\tif m != nil {\n\t\treturn m.MimeType\n\t}\n\treturn nil\n}\n\nfunc (m *CloneBlobRequest) GetTargetAppId() []byte {\n\tif m != nil {\n\t\treturn m.TargetAppId\n\t}\n\treturn nil\n}\n\ntype CloneBlobResponse struct {\n\tBlobKey          []byte `protobuf:\"bytes,1,req,name=blob_key\" json:\"blob_key,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *CloneBlobResponse) Reset()         { *m = CloneBlobResponse{} }\nfunc (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CloneBlobResponse) ProtoMessage()    {}\n\nfunc (m *CloneBlobResponse) GetBlobKey() []byte {\n\tif m != nil {\n\t\treturn m.BlobKey\n\t}\n\treturn nil\n}\n\ntype DecodeBlobKeyRequest struct {\n\tBlobKey          []string `protobuf:\"bytes,1,rep,name=blob_key\" json:\"blob_key,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *DecodeBlobKeyRequest) Reset()         { *m = DecodeBlobKeyRequest{} }\nfunc (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DecodeBlobKeyRequest) ProtoMessage()    {}\n\nfunc (m *DecodeBlobKeyRequest) GetBlobKey() []string {\n\tif m != nil {\n\t\treturn m.BlobKey\n\t}\n\treturn nil\n}\n\ntype DecodeBlobKeyResponse struct {\n\tDecoded          []string `protobuf:\"bytes,1,rep,name=decoded\" json:\"decoded,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *DecodeBlobKeyResponse) Reset()         { *m = DecodeBlobKeyResponse{} }\nfunc (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) }\nfunc (*DecodeBlobKeyResponse) ProtoMessage()    {}\n\nfunc (m *DecodeBlobKeyResponse) GetDecoded() []string {\n\tif m != nil {\n\t\treturn m.Decoded\n\t}\n\treturn nil\n}\n\ntype CreateEncodedGoogleStorageKeyRequest struct {\n\tFilename         *string `protobuf:\"bytes,1,req,name=filename\" json:\"filename,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateEncodedGoogleStorageKeyRequest) Reset()         { *m = CreateEncodedGoogleStorageKeyRequest{} }\nfunc (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage()    {}\n\nfunc (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string {\n\tif m != nil && m.Filename != nil {\n\t\treturn *m.Filename\n\t}\n\treturn \"\"\n}\n\ntype CreateEncodedGoogleStorageKeyResponse struct {\n\tBlobKey          *string `protobuf:\"bytes,1,req,name=blob_key\" json:\"blob_key,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateEncodedGoogleStorageKeyResponse) Reset()         { *m = CreateEncodedGoogleStorageKeyResponse{} }\nfunc (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage()    {}\n\nfunc (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string {\n\tif m != nil && m.BlobKey != nil {\n\t\treturn *m.BlobKey\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"blobstore\";\n\npackage appengine;\n\nmessage BlobstoreServiceError {\n  enum ErrorCode {\n    OK = 0;\n    INTERNAL_ERROR = 1;\n    URL_TOO_LONG = 2;\n    PERMISSION_DENIED = 3;\n    BLOB_NOT_FOUND = 4;\n    DATA_INDEX_OUT_OF_RANGE = 5;\n    BLOB_FETCH_SIZE_TOO_LARGE = 6;\n    ARGUMENT_OUT_OF_RANGE = 8;\n    INVALID_BLOB_KEY = 9;\n  }\n}\n\nmessage CreateUploadURLRequest {\n  required string success_path = 1;\n  optional int64 max_upload_size_bytes = 2;\n  optional int64 max_upload_size_per_blob_bytes = 3;\n  optional string gs_bucket_name = 4;\n  optional int32 url_expiry_time_seconds = 5;\n}\n\nmessage CreateUploadURLResponse {\n  required string url = 1;\n}\n\nmessage DeleteBlobRequest {\n  repeated string blob_key = 1;\n  optional string token = 2;\n}\n\nmessage FetchDataRequest {\n  required string blob_key = 1;\n  required int64 start_index = 2;\n  required int64 end_index = 3;\n}\n\nmessage FetchDataResponse {\n  required bytes data = 1000 [ctype = CORD];\n}\n\nmessage CloneBlobRequest {\n  required bytes blob_key = 1;\n  required bytes mime_type = 2;\n  required bytes target_app_id = 3;\n}\n\nmessage CloneBlobResponse {\n  required bytes blob_key = 1;\n}\n\nmessage DecodeBlobKeyRequest {\n  repeated string blob_key = 1;\n}\n\nmessage DecodeBlobKeyResponse {\n  repeated string decoded = 1;\n}\n\nmessage CreateEncodedGoogleStorageKeyRequest {\n  required string filename = 1;\n}\n\nmessage CreateEncodedGoogleStorageKeyResponse {\n  required string blob_key = 1;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/capability/capability_service.proto\n// DO NOT EDIT!\n\n/*\nPackage channel is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/capability/capability_service.proto\n\nIt has these top-level messages:\n\tIsEnabledRequest\n\tIsEnabledResponse\n*/\npackage channel\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype IsEnabledResponse_SummaryStatus int32\n\nconst (\n\tIsEnabledResponse_DEFAULT          IsEnabledResponse_SummaryStatus = 0\n\tIsEnabledResponse_ENABLED          IsEnabledResponse_SummaryStatus = 1\n\tIsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2\n\tIsEnabledResponse_SCHEDULED_NOW    IsEnabledResponse_SummaryStatus = 3\n\tIsEnabledResponse_DISABLED         IsEnabledResponse_SummaryStatus = 4\n\tIsEnabledResponse_UNKNOWN          IsEnabledResponse_SummaryStatus = 5\n)\n\nvar IsEnabledResponse_SummaryStatus_name = map[int32]string{\n\t0: \"DEFAULT\",\n\t1: \"ENABLED\",\n\t2: \"SCHEDULED_FUTURE\",\n\t3: \"SCHEDULED_NOW\",\n\t4: \"DISABLED\",\n\t5: \"UNKNOWN\",\n}\nvar IsEnabledResponse_SummaryStatus_value = map[string]int32{\n\t\"DEFAULT\":          0,\n\t\"ENABLED\":          1,\n\t\"SCHEDULED_FUTURE\": 2,\n\t\"SCHEDULED_NOW\":    3,\n\t\"DISABLED\":         4,\n\t\"UNKNOWN\":          5,\n}\n\nfunc (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus {\n\tp := new(IsEnabledResponse_SummaryStatus)\n\t*p = x\n\treturn p\n}\nfunc (x IsEnabledResponse_SummaryStatus) String() string {\n\treturn proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x))\n}\nfunc (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, \"IsEnabledResponse_SummaryStatus\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = IsEnabledResponse_SummaryStatus(value)\n\treturn nil\n}\n\ntype IsEnabledRequest struct {\n\tPackage          *string  `protobuf:\"bytes,1,req,name=package\" json:\"package,omitempty\"`\n\tCapability       []string `protobuf:\"bytes,2,rep,name=capability\" json:\"capability,omitempty\"`\n\tCall             []string `protobuf:\"bytes,3,rep,name=call\" json:\"call,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *IsEnabledRequest) Reset()         { *m = IsEnabledRequest{} }\nfunc (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) }\nfunc (*IsEnabledRequest) ProtoMessage()    {}\n\nfunc (m *IsEnabledRequest) GetPackage() string {\n\tif m != nil && m.Package != nil {\n\t\treturn *m.Package\n\t}\n\treturn \"\"\n}\n\nfunc (m *IsEnabledRequest) GetCapability() []string {\n\tif m != nil {\n\t\treturn m.Capability\n\t}\n\treturn nil\n}\n\nfunc (m *IsEnabledRequest) GetCall() []string {\n\tif m != nil {\n\t\treturn m.Call\n\t}\n\treturn nil\n}\n\ntype IsEnabledResponse struct {\n\tSummaryStatus      *IsEnabledResponse_SummaryStatus `protobuf:\"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus\" json:\"summary_status,omitempty\"`\n\tTimeUntilScheduled *int64                           `protobuf:\"varint,2,opt,name=time_until_scheduled\" json:\"time_until_scheduled,omitempty\"`\n\tXXX_unrecognized   []byte                           `json:\"-\"`\n}\n\nfunc (m *IsEnabledResponse) Reset()         { *m = IsEnabledResponse{} }\nfunc (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) }\nfunc (*IsEnabledResponse) ProtoMessage()    {}\n\nfunc (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus {\n\tif m != nil && m.SummaryStatus != nil {\n\t\treturn *m.SummaryStatus\n\t}\n\treturn IsEnabledResponse_DEFAULT\n}\n\nfunc (m *IsEnabledResponse) GetTimeUntilScheduled() int64 {\n\tif m != nil && m.TimeUntilScheduled != nil {\n\t\treturn *m.TimeUntilScheduled\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/capability/capability_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"channel\";\n\npackage appengine;\n\nmessage IsEnabledRequest {\n  required string package = 1;\n  repeated string capability = 2;\n  repeated string call = 3;\n}\n\nmessage IsEnabledResponse {\n  enum SummaryStatus {\n    DEFAULT = 0;\n    ENABLED = 1;\n    SCHEDULED_FUTURE = 2;\n    SCHEDULED_NOW = 3;\n    DISABLED = 4;\n    UNKNOWN = 5;\n  }\n  optional SummaryStatus summary_status = 1;\n\n  optional int64 time_until_scheduled = 2;\n}\n\nservice CapabilityService {\n  rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {};\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/channel/channel_service.proto\n// DO NOT EDIT!\n\n/*\nPackage channel is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/channel/channel_service.proto\n\nIt has these top-level messages:\n\tChannelServiceError\n\tCreateChannelRequest\n\tCreateChannelResponse\n\tSendMessageRequest\n*/\npackage channel\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype ChannelServiceError_ErrorCode int32\n\nconst (\n\tChannelServiceError_OK                             ChannelServiceError_ErrorCode = 0\n\tChannelServiceError_INTERNAL_ERROR                 ChannelServiceError_ErrorCode = 1\n\tChannelServiceError_INVALID_CHANNEL_KEY            ChannelServiceError_ErrorCode = 2\n\tChannelServiceError_BAD_MESSAGE                    ChannelServiceError_ErrorCode = 3\n\tChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4\n\tChannelServiceError_APPID_ALIAS_REQUIRED           ChannelServiceError_ErrorCode = 5\n)\n\nvar ChannelServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"INTERNAL_ERROR\",\n\t2: \"INVALID_CHANNEL_KEY\",\n\t3: \"BAD_MESSAGE\",\n\t4: \"INVALID_CHANNEL_TOKEN_DURATION\",\n\t5: \"APPID_ALIAS_REQUIRED\",\n}\nvar ChannelServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                             0,\n\t\"INTERNAL_ERROR\":                 1,\n\t\"INVALID_CHANNEL_KEY\":            2,\n\t\"BAD_MESSAGE\":                    3,\n\t\"INVALID_CHANNEL_TOKEN_DURATION\": 4,\n\t\"APPID_ALIAS_REQUIRED\":           5,\n}\n\nfunc (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode {\n\tp := new(ChannelServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x ChannelServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, \"ChannelServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ChannelServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype ChannelServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ChannelServiceError) Reset()         { *m = ChannelServiceError{} }\nfunc (m *ChannelServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*ChannelServiceError) ProtoMessage()    {}\n\ntype CreateChannelRequest struct {\n\tApplicationKey   *string `protobuf:\"bytes,1,req,name=application_key\" json:\"application_key,omitempty\"`\n\tDurationMinutes  *int32  `protobuf:\"varint,2,opt,name=duration_minutes\" json:\"duration_minutes,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateChannelRequest) Reset()         { *m = CreateChannelRequest{} }\nfunc (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateChannelRequest) ProtoMessage()    {}\n\nfunc (m *CreateChannelRequest) GetApplicationKey() string {\n\tif m != nil && m.ApplicationKey != nil {\n\t\treturn *m.ApplicationKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateChannelRequest) GetDurationMinutes() int32 {\n\tif m != nil && m.DurationMinutes != nil {\n\t\treturn *m.DurationMinutes\n\t}\n\treturn 0\n}\n\ntype CreateChannelResponse struct {\n\tToken            *string `protobuf:\"bytes,2,opt,name=token\" json:\"token,omitempty\"`\n\tDurationMinutes  *int32  `protobuf:\"varint,3,opt,name=duration_minutes\" json:\"duration_minutes,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateChannelResponse) Reset()         { *m = CreateChannelResponse{} }\nfunc (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CreateChannelResponse) ProtoMessage()    {}\n\nfunc (m *CreateChannelResponse) GetToken() string {\n\tif m != nil && m.Token != nil {\n\t\treturn *m.Token\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateChannelResponse) GetDurationMinutes() int32 {\n\tif m != nil && m.DurationMinutes != nil {\n\t\treturn *m.DurationMinutes\n\t}\n\treturn 0\n}\n\ntype SendMessageRequest struct {\n\tApplicationKey   *string `protobuf:\"bytes,1,req,name=application_key\" json:\"application_key,omitempty\"`\n\tMessage          *string `protobuf:\"bytes,2,req,name=message\" json:\"message,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SendMessageRequest) Reset()         { *m = SendMessageRequest{} }\nfunc (m *SendMessageRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SendMessageRequest) ProtoMessage()    {}\n\nfunc (m *SendMessageRequest) GetApplicationKey() string {\n\tif m != nil && m.ApplicationKey != nil {\n\t\treturn *m.ApplicationKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *SendMessageRequest) GetMessage() string {\n\tif m != nil && m.Message != nil {\n\t\treturn *m.Message\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/channel/channel_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"channel\";\n\npackage appengine;\n\nmessage ChannelServiceError {\n  enum ErrorCode {\n    OK = 0;\n    INTERNAL_ERROR = 1;\n    INVALID_CHANNEL_KEY = 2;\n    BAD_MESSAGE = 3;\n    INVALID_CHANNEL_TOKEN_DURATION = 4;\n    APPID_ALIAS_REQUIRED = 5;\n  }\n}\n\nmessage CreateChannelRequest {\n  required string application_key = 1;\n  optional int32 duration_minutes = 2;\n}\n\nmessage CreateChannelResponse {\n  optional string token = 2;\n  optional int32 duration_minutes = 3;\n}\n\nmessage SendMessageRequest {\n  required string application_key = 1;\n  required string message = 2;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto\n// DO NOT EDIT!\n\n/*\nPackage datastore is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/datastore/datastore_v3.proto\n\nIt has these top-level messages:\n\tAction\n\tPropertyValue\n\tProperty\n\tPath\n\tReference\n\tUser\n\tEntityProto\n\tCompositeProperty\n\tIndex\n\tCompositeIndex\n\tIndexPostfix\n\tIndexPosition\n\tSnapshot\n\tInternalHeader\n\tTransaction\n\tQuery\n\tCompiledQuery\n\tCompiledCursor\n\tCursor\n\tError\n\tCost\n\tGetRequest\n\tGetResponse\n\tPutRequest\n\tPutResponse\n\tTouchRequest\n\tTouchResponse\n\tDeleteRequest\n\tDeleteResponse\n\tNextRequest\n\tQueryResult\n\tAllocateIdsRequest\n\tAllocateIdsResponse\n\tCompositeIndices\n\tAddActionsRequest\n\tAddActionsResponse\n\tBeginTransactionRequest\n\tCommitResponse\n*/\npackage datastore\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype Property_Meaning int32\n\nconst (\n\tProperty_NO_MEANING       Property_Meaning = 0\n\tProperty_BLOB             Property_Meaning = 14\n\tProperty_TEXT             Property_Meaning = 15\n\tProperty_BYTESTRING       Property_Meaning = 16\n\tProperty_ATOM_CATEGORY    Property_Meaning = 1\n\tProperty_ATOM_LINK        Property_Meaning = 2\n\tProperty_ATOM_TITLE       Property_Meaning = 3\n\tProperty_ATOM_CONTENT     Property_Meaning = 4\n\tProperty_ATOM_SUMMARY     Property_Meaning = 5\n\tProperty_ATOM_AUTHOR      Property_Meaning = 6\n\tProperty_GD_WHEN          Property_Meaning = 7\n\tProperty_GD_EMAIL         Property_Meaning = 8\n\tProperty_GEORSS_POINT     Property_Meaning = 9\n\tProperty_GD_IM            Property_Meaning = 10\n\tProperty_GD_PHONENUMBER   Property_Meaning = 11\n\tProperty_GD_POSTALADDRESS Property_Meaning = 12\n\tProperty_GD_RATING        Property_Meaning = 13\n\tProperty_BLOBKEY          Property_Meaning = 17\n\tProperty_ENTITY_PROTO     Property_Meaning = 19\n\tProperty_INDEX_VALUE      Property_Meaning = 18\n)\n\nvar Property_Meaning_name = map[int32]string{\n\t0:  \"NO_MEANING\",\n\t14: \"BLOB\",\n\t15: \"TEXT\",\n\t16: \"BYTESTRING\",\n\t1:  \"ATOM_CATEGORY\",\n\t2:  \"ATOM_LINK\",\n\t3:  \"ATOM_TITLE\",\n\t4:  \"ATOM_CONTENT\",\n\t5:  \"ATOM_SUMMARY\",\n\t6:  \"ATOM_AUTHOR\",\n\t7:  \"GD_WHEN\",\n\t8:  \"GD_EMAIL\",\n\t9:  \"GEORSS_POINT\",\n\t10: \"GD_IM\",\n\t11: \"GD_PHONENUMBER\",\n\t12: \"GD_POSTALADDRESS\",\n\t13: \"GD_RATING\",\n\t17: \"BLOBKEY\",\n\t19: \"ENTITY_PROTO\",\n\t18: \"INDEX_VALUE\",\n}\nvar Property_Meaning_value = map[string]int32{\n\t\"NO_MEANING\":       0,\n\t\"BLOB\":             14,\n\t\"TEXT\":             15,\n\t\"BYTESTRING\":       16,\n\t\"ATOM_CATEGORY\":    1,\n\t\"ATOM_LINK\":        2,\n\t\"ATOM_TITLE\":       3,\n\t\"ATOM_CONTENT\":     4,\n\t\"ATOM_SUMMARY\":     5,\n\t\"ATOM_AUTHOR\":      6,\n\t\"GD_WHEN\":          7,\n\t\"GD_EMAIL\":         8,\n\t\"GEORSS_POINT\":     9,\n\t\"GD_IM\":            10,\n\t\"GD_PHONENUMBER\":   11,\n\t\"GD_POSTALADDRESS\": 12,\n\t\"GD_RATING\":        13,\n\t\"BLOBKEY\":          17,\n\t\"ENTITY_PROTO\":     19,\n\t\"INDEX_VALUE\":      18,\n}\n\nfunc (x Property_Meaning) Enum() *Property_Meaning {\n\tp := new(Property_Meaning)\n\t*p = x\n\treturn p\n}\nfunc (x Property_Meaning) String() string {\n\treturn proto.EnumName(Property_Meaning_name, int32(x))\n}\nfunc (x *Property_Meaning) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, \"Property_Meaning\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Property_Meaning(value)\n\treturn nil\n}\n\ntype Property_FtsTokenizationOption int32\n\nconst (\n\tProperty_HTML Property_FtsTokenizationOption = 1\n\tProperty_ATOM Property_FtsTokenizationOption = 2\n)\n\nvar Property_FtsTokenizationOption_name = map[int32]string{\n\t1: \"HTML\",\n\t2: \"ATOM\",\n}\nvar Property_FtsTokenizationOption_value = map[string]int32{\n\t\"HTML\": 1,\n\t\"ATOM\": 2,\n}\n\nfunc (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {\n\tp := new(Property_FtsTokenizationOption)\n\t*p = x\n\treturn p\n}\nfunc (x Property_FtsTokenizationOption) String() string {\n\treturn proto.EnumName(Property_FtsTokenizationOption_name, int32(x))\n}\nfunc (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, \"Property_FtsTokenizationOption\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Property_FtsTokenizationOption(value)\n\treturn nil\n}\n\ntype EntityProto_Kind int32\n\nconst (\n\tEntityProto_GD_CONTACT EntityProto_Kind = 1\n\tEntityProto_GD_EVENT   EntityProto_Kind = 2\n\tEntityProto_GD_MESSAGE EntityProto_Kind = 3\n)\n\nvar EntityProto_Kind_name = map[int32]string{\n\t1: \"GD_CONTACT\",\n\t2: \"GD_EVENT\",\n\t3: \"GD_MESSAGE\",\n}\nvar EntityProto_Kind_value = map[string]int32{\n\t\"GD_CONTACT\": 1,\n\t\"GD_EVENT\":   2,\n\t\"GD_MESSAGE\": 3,\n}\n\nfunc (x EntityProto_Kind) Enum() *EntityProto_Kind {\n\tp := new(EntityProto_Kind)\n\t*p = x\n\treturn p\n}\nfunc (x EntityProto_Kind) String() string {\n\treturn proto.EnumName(EntityProto_Kind_name, int32(x))\n}\nfunc (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, \"EntityProto_Kind\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = EntityProto_Kind(value)\n\treturn nil\n}\n\ntype Index_Property_Direction int32\n\nconst (\n\tIndex_Property_ASCENDING  Index_Property_Direction = 1\n\tIndex_Property_DESCENDING Index_Property_Direction = 2\n)\n\nvar Index_Property_Direction_name = map[int32]string{\n\t1: \"ASCENDING\",\n\t2: \"DESCENDING\",\n}\nvar Index_Property_Direction_value = map[string]int32{\n\t\"ASCENDING\":  1,\n\t\"DESCENDING\": 2,\n}\n\nfunc (x Index_Property_Direction) Enum() *Index_Property_Direction {\n\tp := new(Index_Property_Direction)\n\t*p = x\n\treturn p\n}\nfunc (x Index_Property_Direction) String() string {\n\treturn proto.EnumName(Index_Property_Direction_name, int32(x))\n}\nfunc (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, \"Index_Property_Direction\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Index_Property_Direction(value)\n\treturn nil\n}\n\ntype CompositeIndex_State int32\n\nconst (\n\tCompositeIndex_WRITE_ONLY CompositeIndex_State = 1\n\tCompositeIndex_READ_WRITE CompositeIndex_State = 2\n\tCompositeIndex_DELETED    CompositeIndex_State = 3\n\tCompositeIndex_ERROR      CompositeIndex_State = 4\n)\n\nvar CompositeIndex_State_name = map[int32]string{\n\t1: \"WRITE_ONLY\",\n\t2: \"READ_WRITE\",\n\t3: \"DELETED\",\n\t4: \"ERROR\",\n}\nvar CompositeIndex_State_value = map[string]int32{\n\t\"WRITE_ONLY\": 1,\n\t\"READ_WRITE\": 2,\n\t\"DELETED\":    3,\n\t\"ERROR\":      4,\n}\n\nfunc (x CompositeIndex_State) Enum() *CompositeIndex_State {\n\tp := new(CompositeIndex_State)\n\t*p = x\n\treturn p\n}\nfunc (x CompositeIndex_State) String() string {\n\treturn proto.EnumName(CompositeIndex_State_name, int32(x))\n}\nfunc (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, \"CompositeIndex_State\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CompositeIndex_State(value)\n\treturn nil\n}\n\ntype Snapshot_Status int32\n\nconst (\n\tSnapshot_INACTIVE Snapshot_Status = 0\n\tSnapshot_ACTIVE   Snapshot_Status = 1\n)\n\nvar Snapshot_Status_name = map[int32]string{\n\t0: \"INACTIVE\",\n\t1: \"ACTIVE\",\n}\nvar Snapshot_Status_value = map[string]int32{\n\t\"INACTIVE\": 0,\n\t\"ACTIVE\":   1,\n}\n\nfunc (x Snapshot_Status) Enum() *Snapshot_Status {\n\tp := new(Snapshot_Status)\n\t*p = x\n\treturn p\n}\nfunc (x Snapshot_Status) String() string {\n\treturn proto.EnumName(Snapshot_Status_name, int32(x))\n}\nfunc (x *Snapshot_Status) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, \"Snapshot_Status\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Snapshot_Status(value)\n\treturn nil\n}\n\ntype Query_Hint int32\n\nconst (\n\tQuery_ORDER_FIRST    Query_Hint = 1\n\tQuery_ANCESTOR_FIRST Query_Hint = 2\n\tQuery_FILTER_FIRST   Query_Hint = 3\n)\n\nvar Query_Hint_name = map[int32]string{\n\t1: \"ORDER_FIRST\",\n\t2: \"ANCESTOR_FIRST\",\n\t3: \"FILTER_FIRST\",\n}\nvar Query_Hint_value = map[string]int32{\n\t\"ORDER_FIRST\":    1,\n\t\"ANCESTOR_FIRST\": 2,\n\t\"FILTER_FIRST\":   3,\n}\n\nfunc (x Query_Hint) Enum() *Query_Hint {\n\tp := new(Query_Hint)\n\t*p = x\n\treturn p\n}\nfunc (x Query_Hint) String() string {\n\treturn proto.EnumName(Query_Hint_name, int32(x))\n}\nfunc (x *Query_Hint) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, \"Query_Hint\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Query_Hint(value)\n\treturn nil\n}\n\ntype Query_Filter_Operator int32\n\nconst (\n\tQuery_Filter_LESS_THAN             Query_Filter_Operator = 1\n\tQuery_Filter_LESS_THAN_OR_EQUAL    Query_Filter_Operator = 2\n\tQuery_Filter_GREATER_THAN          Query_Filter_Operator = 3\n\tQuery_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4\n\tQuery_Filter_EQUAL                 Query_Filter_Operator = 5\n\tQuery_Filter_IN                    Query_Filter_Operator = 6\n\tQuery_Filter_EXISTS                Query_Filter_Operator = 7\n)\n\nvar Query_Filter_Operator_name = map[int32]string{\n\t1: \"LESS_THAN\",\n\t2: \"LESS_THAN_OR_EQUAL\",\n\t3: \"GREATER_THAN\",\n\t4: \"GREATER_THAN_OR_EQUAL\",\n\t5: \"EQUAL\",\n\t6: \"IN\",\n\t7: \"EXISTS\",\n}\nvar Query_Filter_Operator_value = map[string]int32{\n\t\"LESS_THAN\":             1,\n\t\"LESS_THAN_OR_EQUAL\":    2,\n\t\"GREATER_THAN\":          3,\n\t\"GREATER_THAN_OR_EQUAL\": 4,\n\t\"EQUAL\":                 5,\n\t\"IN\":                    6,\n\t\"EXISTS\":                7,\n}\n\nfunc (x Query_Filter_Operator) Enum() *Query_Filter_Operator {\n\tp := new(Query_Filter_Operator)\n\t*p = x\n\treturn p\n}\nfunc (x Query_Filter_Operator) String() string {\n\treturn proto.EnumName(Query_Filter_Operator_name, int32(x))\n}\nfunc (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, \"Query_Filter_Operator\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Query_Filter_Operator(value)\n\treturn nil\n}\n\ntype Query_Order_Direction int32\n\nconst (\n\tQuery_Order_ASCENDING  Query_Order_Direction = 1\n\tQuery_Order_DESCENDING Query_Order_Direction = 2\n)\n\nvar Query_Order_Direction_name = map[int32]string{\n\t1: \"ASCENDING\",\n\t2: \"DESCENDING\",\n}\nvar Query_Order_Direction_value = map[string]int32{\n\t\"ASCENDING\":  1,\n\t\"DESCENDING\": 2,\n}\n\nfunc (x Query_Order_Direction) Enum() *Query_Order_Direction {\n\tp := new(Query_Order_Direction)\n\t*p = x\n\treturn p\n}\nfunc (x Query_Order_Direction) String() string {\n\treturn proto.EnumName(Query_Order_Direction_name, int32(x))\n}\nfunc (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, \"Query_Order_Direction\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Query_Order_Direction(value)\n\treturn nil\n}\n\ntype Error_ErrorCode int32\n\nconst (\n\tError_BAD_REQUEST                  Error_ErrorCode = 1\n\tError_CONCURRENT_TRANSACTION       Error_ErrorCode = 2\n\tError_INTERNAL_ERROR               Error_ErrorCode = 3\n\tError_NEED_INDEX                   Error_ErrorCode = 4\n\tError_TIMEOUT                      Error_ErrorCode = 5\n\tError_PERMISSION_DENIED            Error_ErrorCode = 6\n\tError_BIGTABLE_ERROR               Error_ErrorCode = 7\n\tError_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8\n\tError_CAPABILITY_DISABLED          Error_ErrorCode = 9\n\tError_TRY_ALTERNATE_BACKEND        Error_ErrorCode = 10\n\tError_SAFE_TIME_TOO_OLD            Error_ErrorCode = 11\n)\n\nvar Error_ErrorCode_name = map[int32]string{\n\t1:  \"BAD_REQUEST\",\n\t2:  \"CONCURRENT_TRANSACTION\",\n\t3:  \"INTERNAL_ERROR\",\n\t4:  \"NEED_INDEX\",\n\t5:  \"TIMEOUT\",\n\t6:  \"PERMISSION_DENIED\",\n\t7:  \"BIGTABLE_ERROR\",\n\t8:  \"COMMITTED_BUT_STILL_APPLYING\",\n\t9:  \"CAPABILITY_DISABLED\",\n\t10: \"TRY_ALTERNATE_BACKEND\",\n\t11: \"SAFE_TIME_TOO_OLD\",\n}\nvar Error_ErrorCode_value = map[string]int32{\n\t\"BAD_REQUEST\":                  1,\n\t\"CONCURRENT_TRANSACTION\":       2,\n\t\"INTERNAL_ERROR\":               3,\n\t\"NEED_INDEX\":                   4,\n\t\"TIMEOUT\":                      5,\n\t\"PERMISSION_DENIED\":            6,\n\t\"BIGTABLE_ERROR\":               7,\n\t\"COMMITTED_BUT_STILL_APPLYING\": 8,\n\t\"CAPABILITY_DISABLED\":          9,\n\t\"TRY_ALTERNATE_BACKEND\":        10,\n\t\"SAFE_TIME_TOO_OLD\":            11,\n}\n\nfunc (x Error_ErrorCode) Enum() *Error_ErrorCode {\n\tp := new(Error_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x Error_ErrorCode) String() string {\n\treturn proto.EnumName(Error_ErrorCode_name, int32(x))\n}\nfunc (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, \"Error_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Error_ErrorCode(value)\n\treturn nil\n}\n\ntype PutRequest_AutoIdPolicy int32\n\nconst (\n\tPutRequest_CURRENT    PutRequest_AutoIdPolicy = 0\n\tPutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1\n)\n\nvar PutRequest_AutoIdPolicy_name = map[int32]string{\n\t0: \"CURRENT\",\n\t1: \"SEQUENTIAL\",\n}\nvar PutRequest_AutoIdPolicy_value = map[string]int32{\n\t\"CURRENT\":    0,\n\t\"SEQUENTIAL\": 1,\n}\n\nfunc (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {\n\tp := new(PutRequest_AutoIdPolicy)\n\t*p = x\n\treturn p\n}\nfunc (x PutRequest_AutoIdPolicy) String() string {\n\treturn proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))\n}\nfunc (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, \"PutRequest_AutoIdPolicy\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PutRequest_AutoIdPolicy(value)\n\treturn nil\n}\n\ntype Action struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Action) Reset()         { *m = Action{} }\nfunc (m *Action) String() string { return proto.CompactTextString(m) }\nfunc (*Action) ProtoMessage()    {}\n\ntype PropertyValue struct {\n\tInt64Value       *int64                        `protobuf:\"varint,1,opt,name=int64Value\" json:\"int64Value,omitempty\"`\n\tBooleanValue     *bool                         `protobuf:\"varint,2,opt,name=booleanValue\" json:\"booleanValue,omitempty\"`\n\tStringValue      *string                       `protobuf:\"bytes,3,opt,name=stringValue\" json:\"stringValue,omitempty\"`\n\tDoubleValue      *float64                      `protobuf:\"fixed64,4,opt,name=doubleValue\" json:\"doubleValue,omitempty\"`\n\tPointvalue       *PropertyValue_PointValue     `protobuf:\"group,5,opt,name=PointValue\" json:\"pointvalue,omitempty\"`\n\tUservalue        *PropertyValue_UserValue      `protobuf:\"group,8,opt,name=UserValue\" json:\"uservalue,omitempty\"`\n\tReferencevalue   *PropertyValue_ReferenceValue `protobuf:\"group,12,opt,name=ReferenceValue\" json:\"referencevalue,omitempty\"`\n\tXXX_unrecognized []byte                        `json:\"-\"`\n}\n\nfunc (m *PropertyValue) Reset()         { *m = PropertyValue{} }\nfunc (m *PropertyValue) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyValue) ProtoMessage()    {}\n\nfunc (m *PropertyValue) GetInt64Value() int64 {\n\tif m != nil && m.Int64Value != nil {\n\t\treturn *m.Int64Value\n\t}\n\treturn 0\n}\n\nfunc (m *PropertyValue) GetBooleanValue() bool {\n\tif m != nil && m.BooleanValue != nil {\n\t\treturn *m.BooleanValue\n\t}\n\treturn false\n}\n\nfunc (m *PropertyValue) GetStringValue() string {\n\tif m != nil && m.StringValue != nil {\n\t\treturn *m.StringValue\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue) GetDoubleValue() float64 {\n\tif m != nil && m.DoubleValue != nil {\n\t\treturn *m.DoubleValue\n\t}\n\treturn 0\n}\n\nfunc (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {\n\tif m != nil {\n\t\treturn m.Pointvalue\n\t}\n\treturn nil\n}\n\nfunc (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {\n\tif m != nil {\n\t\treturn m.Uservalue\n\t}\n\treturn nil\n}\n\nfunc (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {\n\tif m != nil {\n\t\treturn m.Referencevalue\n\t}\n\treturn nil\n}\n\ntype PropertyValue_PointValue struct {\n\tX                *float64 `protobuf:\"fixed64,6,req,name=x\" json:\"x,omitempty\"`\n\tY                *float64 `protobuf:\"fixed64,7,req,name=y\" json:\"y,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *PropertyValue_PointValue) Reset()         { *m = PropertyValue_PointValue{} }\nfunc (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyValue_PointValue) ProtoMessage()    {}\n\nfunc (m *PropertyValue_PointValue) GetX() float64 {\n\tif m != nil && m.X != nil {\n\t\treturn *m.X\n\t}\n\treturn 0\n}\n\nfunc (m *PropertyValue_PointValue) GetY() float64 {\n\tif m != nil && m.Y != nil {\n\t\treturn *m.Y\n\t}\n\treturn 0\n}\n\ntype PropertyValue_UserValue struct {\n\tEmail             *string `protobuf:\"bytes,9,req,name=email\" json:\"email,omitempty\"`\n\tAuthDomain        *string `protobuf:\"bytes,10,req,name=auth_domain\" json:\"auth_domain,omitempty\"`\n\tNickname          *string `protobuf:\"bytes,11,opt,name=nickname\" json:\"nickname,omitempty\"`\n\tFederatedIdentity *string `protobuf:\"bytes,21,opt,name=federated_identity\" json:\"federated_identity,omitempty\"`\n\tFederatedProvider *string `protobuf:\"bytes,22,opt,name=federated_provider\" json:\"federated_provider,omitempty\"`\n\tXXX_unrecognized  []byte  `json:\"-\"`\n}\n\nfunc (m *PropertyValue_UserValue) Reset()         { *m = PropertyValue_UserValue{} }\nfunc (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyValue_UserValue) ProtoMessage()    {}\n\nfunc (m *PropertyValue_UserValue) GetEmail() string {\n\tif m != nil && m.Email != nil {\n\t\treturn *m.Email\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue_UserValue) GetAuthDomain() string {\n\tif m != nil && m.AuthDomain != nil {\n\t\treturn *m.AuthDomain\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue_UserValue) GetNickname() string {\n\tif m != nil && m.Nickname != nil {\n\t\treturn *m.Nickname\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue_UserValue) GetFederatedIdentity() string {\n\tif m != nil && m.FederatedIdentity != nil {\n\t\treturn *m.FederatedIdentity\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue_UserValue) GetFederatedProvider() string {\n\tif m != nil && m.FederatedProvider != nil {\n\t\treturn *m.FederatedProvider\n\t}\n\treturn \"\"\n}\n\ntype PropertyValue_ReferenceValue struct {\n\tApp              *string                                     `protobuf:\"bytes,13,req,name=app\" json:\"app,omitempty\"`\n\tNameSpace        *string                                     `protobuf:\"bytes,20,opt,name=name_space\" json:\"name_space,omitempty\"`\n\tPathelement      []*PropertyValue_ReferenceValue_PathElement `protobuf:\"group,14,rep,name=PathElement\" json:\"pathelement,omitempty\"`\n\tXXX_unrecognized []byte                                      `json:\"-\"`\n}\n\nfunc (m *PropertyValue_ReferenceValue) Reset()         { *m = PropertyValue_ReferenceValue{} }\nfunc (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyValue_ReferenceValue) ProtoMessage()    {}\n\nfunc (m *PropertyValue_ReferenceValue) GetApp() string {\n\tif m != nil && m.App != nil {\n\t\treturn *m.App\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue_ReferenceValue) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {\n\tif m != nil {\n\t\treturn m.Pathelement\n\t}\n\treturn nil\n}\n\ntype PropertyValue_ReferenceValue_PathElement struct {\n\tType             *string `protobuf:\"bytes,15,req,name=type\" json:\"type,omitempty\"`\n\tId               *int64  `protobuf:\"varint,16,opt,name=id\" json:\"id,omitempty\"`\n\tName             *string `protobuf:\"bytes,17,opt,name=name\" json:\"name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *PropertyValue_ReferenceValue_PathElement) Reset() {\n\t*m = PropertyValue_ReferenceValue_PathElement{}\n}\nfunc (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyValue_ReferenceValue_PathElement) ProtoMessage()    {}\n\nfunc (m *PropertyValue_ReferenceValue_PathElement) GetType() string {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn \"\"\n}\n\nfunc (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {\n\tif m != nil && m.Id != nil {\n\t\treturn *m.Id\n\t}\n\treturn 0\n}\n\nfunc (m *PropertyValue_ReferenceValue_PathElement) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\ntype Property struct {\n\tMeaning               *Property_Meaning               `protobuf:\"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0\" json:\"meaning,omitempty\"`\n\tMeaningUri            *string                         `protobuf:\"bytes,2,opt,name=meaning_uri\" json:\"meaning_uri,omitempty\"`\n\tName                  *string                         `protobuf:\"bytes,3,req,name=name\" json:\"name,omitempty\"`\n\tValue                 *PropertyValue                  `protobuf:\"bytes,5,req,name=value\" json:\"value,omitempty\"`\n\tMultiple              *bool                           `protobuf:\"varint,4,req,name=multiple\" json:\"multiple,omitempty\"`\n\tSearchable            *bool                           `protobuf:\"varint,6,opt,name=searchable,def=0\" json:\"searchable,omitempty\"`\n\tFtsTokenizationOption *Property_FtsTokenizationOption `protobuf:\"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption\" json:\"fts_tokenization_option,omitempty\"`\n\tLocale                *string                         `protobuf:\"bytes,9,opt,name=locale,def=en\" json:\"locale,omitempty\"`\n\tXXX_unrecognized      []byte                          `json:\"-\"`\n}\n\nfunc (m *Property) Reset()         { *m = Property{} }\nfunc (m *Property) String() string { return proto.CompactTextString(m) }\nfunc (*Property) ProtoMessage()    {}\n\nconst Default_Property_Meaning Property_Meaning = Property_NO_MEANING\nconst Default_Property_Searchable bool = false\nconst Default_Property_Locale string = \"en\"\n\nfunc (m *Property) GetMeaning() Property_Meaning {\n\tif m != nil && m.Meaning != nil {\n\t\treturn *m.Meaning\n\t}\n\treturn Default_Property_Meaning\n}\n\nfunc (m *Property) GetMeaningUri() string {\n\tif m != nil && m.MeaningUri != nil {\n\t\treturn *m.MeaningUri\n\t}\n\treturn \"\"\n}\n\nfunc (m *Property) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Property) GetValue() *PropertyValue {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\nfunc (m *Property) GetMultiple() bool {\n\tif m != nil && m.Multiple != nil {\n\t\treturn *m.Multiple\n\t}\n\treturn false\n}\n\nfunc (m *Property) GetSearchable() bool {\n\tif m != nil && m.Searchable != nil {\n\t\treturn *m.Searchable\n\t}\n\treturn Default_Property_Searchable\n}\n\nfunc (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {\n\tif m != nil && m.FtsTokenizationOption != nil {\n\t\treturn *m.FtsTokenizationOption\n\t}\n\treturn Property_HTML\n}\n\nfunc (m *Property) GetLocale() string {\n\tif m != nil && m.Locale != nil {\n\t\treturn *m.Locale\n\t}\n\treturn Default_Property_Locale\n}\n\ntype Path struct {\n\tElement          []*Path_Element `protobuf:\"group,1,rep,name=Element\" json:\"element,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *Path) Reset()         { *m = Path{} }\nfunc (m *Path) String() string { return proto.CompactTextString(m) }\nfunc (*Path) ProtoMessage()    {}\n\nfunc (m *Path) GetElement() []*Path_Element {\n\tif m != nil {\n\t\treturn m.Element\n\t}\n\treturn nil\n}\n\ntype Path_Element struct {\n\tType             *string `protobuf:\"bytes,2,req,name=type\" json:\"type,omitempty\"`\n\tId               *int64  `protobuf:\"varint,3,opt,name=id\" json:\"id,omitempty\"`\n\tName             *string `protobuf:\"bytes,4,opt,name=name\" json:\"name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Path_Element) Reset()         { *m = Path_Element{} }\nfunc (m *Path_Element) String() string { return proto.CompactTextString(m) }\nfunc (*Path_Element) ProtoMessage()    {}\n\nfunc (m *Path_Element) GetType() string {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn \"\"\n}\n\nfunc (m *Path_Element) GetId() int64 {\n\tif m != nil && m.Id != nil {\n\t\treturn *m.Id\n\t}\n\treturn 0\n}\n\nfunc (m *Path_Element) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\ntype Reference struct {\n\tApp              *string `protobuf:\"bytes,13,req,name=app\" json:\"app,omitempty\"`\n\tNameSpace        *string `protobuf:\"bytes,20,opt,name=name_space\" json:\"name_space,omitempty\"`\n\tPath             *Path   `protobuf:\"bytes,14,req,name=path\" json:\"path,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Reference) Reset()         { *m = Reference{} }\nfunc (m *Reference) String() string { return proto.CompactTextString(m) }\nfunc (*Reference) ProtoMessage()    {}\n\nfunc (m *Reference) GetApp() string {\n\tif m != nil && m.App != nil {\n\t\treturn *m.App\n\t}\n\treturn \"\"\n}\n\nfunc (m *Reference) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *Reference) GetPath() *Path {\n\tif m != nil {\n\t\treturn m.Path\n\t}\n\treturn nil\n}\n\ntype User struct {\n\tEmail             *string `protobuf:\"bytes,1,req,name=email\" json:\"email,omitempty\"`\n\tAuthDomain        *string `protobuf:\"bytes,2,req,name=auth_domain\" json:\"auth_domain,omitempty\"`\n\tNickname          *string `protobuf:\"bytes,3,opt,name=nickname\" json:\"nickname,omitempty\"`\n\tFederatedIdentity *string `protobuf:\"bytes,6,opt,name=federated_identity\" json:\"federated_identity,omitempty\"`\n\tFederatedProvider *string `protobuf:\"bytes,7,opt,name=federated_provider\" json:\"federated_provider,omitempty\"`\n\tXXX_unrecognized  []byte  `json:\"-\"`\n}\n\nfunc (m *User) Reset()         { *m = User{} }\nfunc (m *User) String() string { return proto.CompactTextString(m) }\nfunc (*User) ProtoMessage()    {}\n\nfunc (m *User) GetEmail() string {\n\tif m != nil && m.Email != nil {\n\t\treturn *m.Email\n\t}\n\treturn \"\"\n}\n\nfunc (m *User) GetAuthDomain() string {\n\tif m != nil && m.AuthDomain != nil {\n\t\treturn *m.AuthDomain\n\t}\n\treturn \"\"\n}\n\nfunc (m *User) GetNickname() string {\n\tif m != nil && m.Nickname != nil {\n\t\treturn *m.Nickname\n\t}\n\treturn \"\"\n}\n\nfunc (m *User) GetFederatedIdentity() string {\n\tif m != nil && m.FederatedIdentity != nil {\n\t\treturn *m.FederatedIdentity\n\t}\n\treturn \"\"\n}\n\nfunc (m *User) GetFederatedProvider() string {\n\tif m != nil && m.FederatedProvider != nil {\n\t\treturn *m.FederatedProvider\n\t}\n\treturn \"\"\n}\n\ntype EntityProto struct {\n\tKey              *Reference        `protobuf:\"bytes,13,req,name=key\" json:\"key,omitempty\"`\n\tEntityGroup      *Path             `protobuf:\"bytes,16,req,name=entity_group\" json:\"entity_group,omitempty\"`\n\tOwner            *User             `protobuf:\"bytes,17,opt,name=owner\" json:\"owner,omitempty\"`\n\tKind             *EntityProto_Kind `protobuf:\"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind\" json:\"kind,omitempty\"`\n\tKindUri          *string           `protobuf:\"bytes,5,opt,name=kind_uri\" json:\"kind_uri,omitempty\"`\n\tProperty         []*Property       `protobuf:\"bytes,14,rep,name=property\" json:\"property,omitempty\"`\n\tRawProperty      []*Property       `protobuf:\"bytes,15,rep,name=raw_property\" json:\"raw_property,omitempty\"`\n\tRank             *int32            `protobuf:\"varint,18,opt,name=rank\" json:\"rank,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *EntityProto) Reset()         { *m = EntityProto{} }\nfunc (m *EntityProto) String() string { return proto.CompactTextString(m) }\nfunc (*EntityProto) ProtoMessage()    {}\n\nfunc (m *EntityProto) GetKey() *Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *EntityProto) GetEntityGroup() *Path {\n\tif m != nil {\n\t\treturn m.EntityGroup\n\t}\n\treturn nil\n}\n\nfunc (m *EntityProto) GetOwner() *User {\n\tif m != nil {\n\t\treturn m.Owner\n\t}\n\treturn nil\n}\n\nfunc (m *EntityProto) GetKind() EntityProto_Kind {\n\tif m != nil && m.Kind != nil {\n\t\treturn *m.Kind\n\t}\n\treturn EntityProto_GD_CONTACT\n}\n\nfunc (m *EntityProto) GetKindUri() string {\n\tif m != nil && m.KindUri != nil {\n\t\treturn *m.KindUri\n\t}\n\treturn \"\"\n}\n\nfunc (m *EntityProto) GetProperty() []*Property {\n\tif m != nil {\n\t\treturn m.Property\n\t}\n\treturn nil\n}\n\nfunc (m *EntityProto) GetRawProperty() []*Property {\n\tif m != nil {\n\t\treturn m.RawProperty\n\t}\n\treturn nil\n}\n\nfunc (m *EntityProto) GetRank() int32 {\n\tif m != nil && m.Rank != nil {\n\t\treturn *m.Rank\n\t}\n\treturn 0\n}\n\ntype CompositeProperty struct {\n\tIndexId          *int64   `protobuf:\"varint,1,req,name=index_id\" json:\"index_id,omitempty\"`\n\tValue            []string `protobuf:\"bytes,2,rep,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *CompositeProperty) Reset()         { *m = CompositeProperty{} }\nfunc (m *CompositeProperty) String() string { return proto.CompactTextString(m) }\nfunc (*CompositeProperty) ProtoMessage()    {}\n\nfunc (m *CompositeProperty) GetIndexId() int64 {\n\tif m != nil && m.IndexId != nil {\n\t\treturn *m.IndexId\n\t}\n\treturn 0\n}\n\nfunc (m *CompositeProperty) GetValue() []string {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype Index struct {\n\tEntityType       *string           `protobuf:\"bytes,1,req,name=entity_type\" json:\"entity_type,omitempty\"`\n\tAncestor         *bool             `protobuf:\"varint,5,req,name=ancestor\" json:\"ancestor,omitempty\"`\n\tProperty         []*Index_Property `protobuf:\"group,2,rep,name=Property\" json:\"property,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *Index) Reset()         { *m = Index{} }\nfunc (m *Index) String() string { return proto.CompactTextString(m) }\nfunc (*Index) ProtoMessage()    {}\n\nfunc (m *Index) GetEntityType() string {\n\tif m != nil && m.EntityType != nil {\n\t\treturn *m.EntityType\n\t}\n\treturn \"\"\n}\n\nfunc (m *Index) GetAncestor() bool {\n\tif m != nil && m.Ancestor != nil {\n\t\treturn *m.Ancestor\n\t}\n\treturn false\n}\n\nfunc (m *Index) GetProperty() []*Index_Property {\n\tif m != nil {\n\t\treturn m.Property\n\t}\n\treturn nil\n}\n\ntype Index_Property struct {\n\tName             *string                   `protobuf:\"bytes,3,req,name=name\" json:\"name,omitempty\"`\n\tDirection        *Index_Property_Direction `protobuf:\"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1\" json:\"direction,omitempty\"`\n\tXXX_unrecognized []byte                    `json:\"-\"`\n}\n\nfunc (m *Index_Property) Reset()         { *m = Index_Property{} }\nfunc (m *Index_Property) String() string { return proto.CompactTextString(m) }\nfunc (*Index_Property) ProtoMessage()    {}\n\nconst Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING\n\nfunc (m *Index_Property) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Index_Property) GetDirection() Index_Property_Direction {\n\tif m != nil && m.Direction != nil {\n\t\treturn *m.Direction\n\t}\n\treturn Default_Index_Property_Direction\n}\n\ntype CompositeIndex struct {\n\tAppId             *string               `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tId                *int64                `protobuf:\"varint,2,req,name=id\" json:\"id,omitempty\"`\n\tDefinition        *Index                `protobuf:\"bytes,3,req,name=definition\" json:\"definition,omitempty\"`\n\tState             *CompositeIndex_State `protobuf:\"varint,4,req,name=state,enum=appengine.CompositeIndex_State\" json:\"state,omitempty\"`\n\tOnlyUseIfRequired *bool                 `protobuf:\"varint,6,opt,name=only_use_if_required,def=0\" json:\"only_use_if_required,omitempty\"`\n\tXXX_unrecognized  []byte                `json:\"-\"`\n}\n\nfunc (m *CompositeIndex) Reset()         { *m = CompositeIndex{} }\nfunc (m *CompositeIndex) String() string { return proto.CompactTextString(m) }\nfunc (*CompositeIndex) ProtoMessage()    {}\n\nconst Default_CompositeIndex_OnlyUseIfRequired bool = false\n\nfunc (m *CompositeIndex) GetAppId() string {\n\tif m != nil && m.AppId != nil {\n\t\treturn *m.AppId\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompositeIndex) GetId() int64 {\n\tif m != nil && m.Id != nil {\n\t\treturn *m.Id\n\t}\n\treturn 0\n}\n\nfunc (m *CompositeIndex) GetDefinition() *Index {\n\tif m != nil {\n\t\treturn m.Definition\n\t}\n\treturn nil\n}\n\nfunc (m *CompositeIndex) GetState() CompositeIndex_State {\n\tif m != nil && m.State != nil {\n\t\treturn *m.State\n\t}\n\treturn CompositeIndex_WRITE_ONLY\n}\n\nfunc (m *CompositeIndex) GetOnlyUseIfRequired() bool {\n\tif m != nil && m.OnlyUseIfRequired != nil {\n\t\treturn *m.OnlyUseIfRequired\n\t}\n\treturn Default_CompositeIndex_OnlyUseIfRequired\n}\n\ntype IndexPostfix struct {\n\tIndexValue       []*IndexPostfix_IndexValue `protobuf:\"bytes,1,rep,name=index_value\" json:\"index_value,omitempty\"`\n\tKey              *Reference                 `protobuf:\"bytes,2,opt,name=key\" json:\"key,omitempty\"`\n\tBefore           *bool                      `protobuf:\"varint,3,opt,name=before,def=1\" json:\"before,omitempty\"`\n\tXXX_unrecognized []byte                     `json:\"-\"`\n}\n\nfunc (m *IndexPostfix) Reset()         { *m = IndexPostfix{} }\nfunc (m *IndexPostfix) String() string { return proto.CompactTextString(m) }\nfunc (*IndexPostfix) ProtoMessage()    {}\n\nconst Default_IndexPostfix_Before bool = true\n\nfunc (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {\n\tif m != nil {\n\t\treturn m.IndexValue\n\t}\n\treturn nil\n}\n\nfunc (m *IndexPostfix) GetKey() *Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *IndexPostfix) GetBefore() bool {\n\tif m != nil && m.Before != nil {\n\t\treturn *m.Before\n\t}\n\treturn Default_IndexPostfix_Before\n}\n\ntype IndexPostfix_IndexValue struct {\n\tPropertyName     *string        `protobuf:\"bytes,1,req,name=property_name\" json:\"property_name,omitempty\"`\n\tValue            *PropertyValue `protobuf:\"bytes,2,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte         `json:\"-\"`\n}\n\nfunc (m *IndexPostfix_IndexValue) Reset()         { *m = IndexPostfix_IndexValue{} }\nfunc (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }\nfunc (*IndexPostfix_IndexValue) ProtoMessage()    {}\n\nfunc (m *IndexPostfix_IndexValue) GetPropertyName() string {\n\tif m != nil && m.PropertyName != nil {\n\t\treturn *m.PropertyName\n\t}\n\treturn \"\"\n}\n\nfunc (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype IndexPosition struct {\n\tKey              *string `protobuf:\"bytes,1,opt,name=key\" json:\"key,omitempty\"`\n\tBefore           *bool   `protobuf:\"varint,2,opt,name=before,def=1\" json:\"before,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *IndexPosition) Reset()         { *m = IndexPosition{} }\nfunc (m *IndexPosition) String() string { return proto.CompactTextString(m) }\nfunc (*IndexPosition) ProtoMessage()    {}\n\nconst Default_IndexPosition_Before bool = true\n\nfunc (m *IndexPosition) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *IndexPosition) GetBefore() bool {\n\tif m != nil && m.Before != nil {\n\t\treturn *m.Before\n\t}\n\treturn Default_IndexPosition_Before\n}\n\ntype Snapshot struct {\n\tTs               *int64 `protobuf:\"varint,1,req,name=ts\" json:\"ts,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Snapshot) Reset()         { *m = Snapshot{} }\nfunc (m *Snapshot) String() string { return proto.CompactTextString(m) }\nfunc (*Snapshot) ProtoMessage()    {}\n\nfunc (m *Snapshot) GetTs() int64 {\n\tif m != nil && m.Ts != nil {\n\t\treturn *m.Ts\n\t}\n\treturn 0\n}\n\ntype InternalHeader struct {\n\tQos              *string `protobuf:\"bytes,1,opt,name=qos\" json:\"qos,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *InternalHeader) Reset()         { *m = InternalHeader{} }\nfunc (m *InternalHeader) String() string { return proto.CompactTextString(m) }\nfunc (*InternalHeader) ProtoMessage()    {}\n\nfunc (m *InternalHeader) GetQos() string {\n\tif m != nil && m.Qos != nil {\n\t\treturn *m.Qos\n\t}\n\treturn \"\"\n}\n\ntype Transaction struct {\n\tHeader           *InternalHeader `protobuf:\"bytes,4,opt,name=header\" json:\"header,omitempty\"`\n\tHandle           *uint64         `protobuf:\"fixed64,1,req,name=handle\" json:\"handle,omitempty\"`\n\tApp              *string         `protobuf:\"bytes,2,req,name=app\" json:\"app,omitempty\"`\n\tMarkChanges      *bool           `protobuf:\"varint,3,opt,name=mark_changes,def=0\" json:\"mark_changes,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *Transaction) Reset()         { *m = Transaction{} }\nfunc (m *Transaction) String() string { return proto.CompactTextString(m) }\nfunc (*Transaction) ProtoMessage()    {}\n\nconst Default_Transaction_MarkChanges bool = false\n\nfunc (m *Transaction) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *Transaction) GetHandle() uint64 {\n\tif m != nil && m.Handle != nil {\n\t\treturn *m.Handle\n\t}\n\treturn 0\n}\n\nfunc (m *Transaction) GetApp() string {\n\tif m != nil && m.App != nil {\n\t\treturn *m.App\n\t}\n\treturn \"\"\n}\n\nfunc (m *Transaction) GetMarkChanges() bool {\n\tif m != nil && m.MarkChanges != nil {\n\t\treturn *m.MarkChanges\n\t}\n\treturn Default_Transaction_MarkChanges\n}\n\ntype Query struct {\n\tHeader              *InternalHeader   `protobuf:\"bytes,39,opt,name=header\" json:\"header,omitempty\"`\n\tApp                 *string           `protobuf:\"bytes,1,req,name=app\" json:\"app,omitempty\"`\n\tNameSpace           *string           `protobuf:\"bytes,29,opt,name=name_space\" json:\"name_space,omitempty\"`\n\tKind                *string           `protobuf:\"bytes,3,opt,name=kind\" json:\"kind,omitempty\"`\n\tAncestor            *Reference        `protobuf:\"bytes,17,opt,name=ancestor\" json:\"ancestor,omitempty\"`\n\tFilter              []*Query_Filter   `protobuf:\"group,4,rep,name=Filter\" json:\"filter,omitempty\"`\n\tSearchQuery         *string           `protobuf:\"bytes,8,opt,name=search_query\" json:\"search_query,omitempty\"`\n\tOrder               []*Query_Order    `protobuf:\"group,9,rep,name=Order\" json:\"order,omitempty\"`\n\tHint                *Query_Hint       `protobuf:\"varint,18,opt,name=hint,enum=appengine.Query_Hint\" json:\"hint,omitempty\"`\n\tCount               *int32            `protobuf:\"varint,23,opt,name=count\" json:\"count,omitempty\"`\n\tOffset              *int32            `protobuf:\"varint,12,opt,name=offset,def=0\" json:\"offset,omitempty\"`\n\tLimit               *int32            `protobuf:\"varint,16,opt,name=limit\" json:\"limit,omitempty\"`\n\tCompiledCursor      *CompiledCursor   `protobuf:\"bytes,30,opt,name=compiled_cursor\" json:\"compiled_cursor,omitempty\"`\n\tEndCompiledCursor   *CompiledCursor   `protobuf:\"bytes,31,opt,name=end_compiled_cursor\" json:\"end_compiled_cursor,omitempty\"`\n\tCompositeIndex      []*CompositeIndex `protobuf:\"bytes,19,rep,name=composite_index\" json:\"composite_index,omitempty\"`\n\tRequirePerfectPlan  *bool             `protobuf:\"varint,20,opt,name=require_perfect_plan,def=0\" json:\"require_perfect_plan,omitempty\"`\n\tKeysOnly            *bool             `protobuf:\"varint,21,opt,name=keys_only,def=0\" json:\"keys_only,omitempty\"`\n\tTransaction         *Transaction      `protobuf:\"bytes,22,opt,name=transaction\" json:\"transaction,omitempty\"`\n\tCompile             *bool             `protobuf:\"varint,25,opt,name=compile,def=0\" json:\"compile,omitempty\"`\n\tFailoverMs          *int64            `protobuf:\"varint,26,opt,name=failover_ms\" json:\"failover_ms,omitempty\"`\n\tStrong              *bool             `protobuf:\"varint,32,opt,name=strong\" json:\"strong,omitempty\"`\n\tPropertyName        []string          `protobuf:\"bytes,33,rep,name=property_name\" json:\"property_name,omitempty\"`\n\tGroupByPropertyName []string          `protobuf:\"bytes,34,rep,name=group_by_property_name\" json:\"group_by_property_name,omitempty\"`\n\tDistinct            *bool             `protobuf:\"varint,24,opt,name=distinct\" json:\"distinct,omitempty\"`\n\tMinSafeTimeSeconds  *int64            `protobuf:\"varint,35,opt,name=min_safe_time_seconds\" json:\"min_safe_time_seconds,omitempty\"`\n\tSafeReplicaName     []string          `protobuf:\"bytes,36,rep,name=safe_replica_name\" json:\"safe_replica_name,omitempty\"`\n\tPersistOffset       *bool             `protobuf:\"varint,37,opt,name=persist_offset,def=0\" json:\"persist_offset,omitempty\"`\n\tXXX_unrecognized    []byte            `json:\"-\"`\n}\n\nfunc (m *Query) Reset()         { *m = Query{} }\nfunc (m *Query) String() string { return proto.CompactTextString(m) }\nfunc (*Query) ProtoMessage()    {}\n\nconst Default_Query_Offset int32 = 0\nconst Default_Query_RequirePerfectPlan bool = false\nconst Default_Query_KeysOnly bool = false\nconst Default_Query_Compile bool = false\nconst Default_Query_PersistOffset bool = false\n\nfunc (m *Query) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetApp() string {\n\tif m != nil && m.App != nil {\n\t\treturn *m.App\n\t}\n\treturn \"\"\n}\n\nfunc (m *Query) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *Query) GetKind() string {\n\tif m != nil && m.Kind != nil {\n\t\treturn *m.Kind\n\t}\n\treturn \"\"\n}\n\nfunc (m *Query) GetAncestor() *Reference {\n\tif m != nil {\n\t\treturn m.Ancestor\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetFilter() []*Query_Filter {\n\tif m != nil {\n\t\treturn m.Filter\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetSearchQuery() string {\n\tif m != nil && m.SearchQuery != nil {\n\t\treturn *m.SearchQuery\n\t}\n\treturn \"\"\n}\n\nfunc (m *Query) GetOrder() []*Query_Order {\n\tif m != nil {\n\t\treturn m.Order\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetHint() Query_Hint {\n\tif m != nil && m.Hint != nil {\n\t\treturn *m.Hint\n\t}\n\treturn Query_ORDER_FIRST\n}\n\nfunc (m *Query) GetCount() int32 {\n\tif m != nil && m.Count != nil {\n\t\treturn *m.Count\n\t}\n\treturn 0\n}\n\nfunc (m *Query) GetOffset() int32 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn Default_Query_Offset\n}\n\nfunc (m *Query) GetLimit() int32 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn 0\n}\n\nfunc (m *Query) GetCompiledCursor() *CompiledCursor {\n\tif m != nil {\n\t\treturn m.CompiledCursor\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetEndCompiledCursor() *CompiledCursor {\n\tif m != nil {\n\t\treturn m.EndCompiledCursor\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetCompositeIndex() []*CompositeIndex {\n\tif m != nil {\n\t\treturn m.CompositeIndex\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetRequirePerfectPlan() bool {\n\tif m != nil && m.RequirePerfectPlan != nil {\n\t\treturn *m.RequirePerfectPlan\n\t}\n\treturn Default_Query_RequirePerfectPlan\n}\n\nfunc (m *Query) GetKeysOnly() bool {\n\tif m != nil && m.KeysOnly != nil {\n\t\treturn *m.KeysOnly\n\t}\n\treturn Default_Query_KeysOnly\n}\n\nfunc (m *Query) GetTransaction() *Transaction {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetCompile() bool {\n\tif m != nil && m.Compile != nil {\n\t\treturn *m.Compile\n\t}\n\treturn Default_Query_Compile\n}\n\nfunc (m *Query) GetFailoverMs() int64 {\n\tif m != nil && m.FailoverMs != nil {\n\t\treturn *m.FailoverMs\n\t}\n\treturn 0\n}\n\nfunc (m *Query) GetStrong() bool {\n\tif m != nil && m.Strong != nil {\n\t\treturn *m.Strong\n\t}\n\treturn false\n}\n\nfunc (m *Query) GetPropertyName() []string {\n\tif m != nil {\n\t\treturn m.PropertyName\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetGroupByPropertyName() []string {\n\tif m != nil {\n\t\treturn m.GroupByPropertyName\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetDistinct() bool {\n\tif m != nil && m.Distinct != nil {\n\t\treturn *m.Distinct\n\t}\n\treturn false\n}\n\nfunc (m *Query) GetMinSafeTimeSeconds() int64 {\n\tif m != nil && m.MinSafeTimeSeconds != nil {\n\t\treturn *m.MinSafeTimeSeconds\n\t}\n\treturn 0\n}\n\nfunc (m *Query) GetSafeReplicaName() []string {\n\tif m != nil {\n\t\treturn m.SafeReplicaName\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetPersistOffset() bool {\n\tif m != nil && m.PersistOffset != nil {\n\t\treturn *m.PersistOffset\n\t}\n\treturn Default_Query_PersistOffset\n}\n\ntype Query_Filter struct {\n\tOp               *Query_Filter_Operator `protobuf:\"varint,6,req,name=op,enum=appengine.Query_Filter_Operator\" json:\"op,omitempty\"`\n\tProperty         []*Property            `protobuf:\"bytes,14,rep,name=property\" json:\"property,omitempty\"`\n\tXXX_unrecognized []byte                 `json:\"-\"`\n}\n\nfunc (m *Query_Filter) Reset()         { *m = Query_Filter{} }\nfunc (m *Query_Filter) String() string { return proto.CompactTextString(m) }\nfunc (*Query_Filter) ProtoMessage()    {}\n\nfunc (m *Query_Filter) GetOp() Query_Filter_Operator {\n\tif m != nil && m.Op != nil {\n\t\treturn *m.Op\n\t}\n\treturn Query_Filter_LESS_THAN\n}\n\nfunc (m *Query_Filter) GetProperty() []*Property {\n\tif m != nil {\n\t\treturn m.Property\n\t}\n\treturn nil\n}\n\ntype Query_Order struct {\n\tProperty         *string                `protobuf:\"bytes,10,req,name=property\" json:\"property,omitempty\"`\n\tDirection        *Query_Order_Direction `protobuf:\"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1\" json:\"direction,omitempty\"`\n\tXXX_unrecognized []byte                 `json:\"-\"`\n}\n\nfunc (m *Query_Order) Reset()         { *m = Query_Order{} }\nfunc (m *Query_Order) String() string { return proto.CompactTextString(m) }\nfunc (*Query_Order) ProtoMessage()    {}\n\nconst Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING\n\nfunc (m *Query_Order) GetProperty() string {\n\tif m != nil && m.Property != nil {\n\t\treturn *m.Property\n\t}\n\treturn \"\"\n}\n\nfunc (m *Query_Order) GetDirection() Query_Order_Direction {\n\tif m != nil && m.Direction != nil {\n\t\treturn *m.Direction\n\t}\n\treturn Default_Query_Order_Direction\n}\n\ntype CompiledQuery struct {\n\tPrimaryscan       *CompiledQuery_PrimaryScan     `protobuf:\"group,1,req,name=PrimaryScan\" json:\"primaryscan,omitempty\"`\n\tMergejoinscan     []*CompiledQuery_MergeJoinScan `protobuf:\"group,7,rep,name=MergeJoinScan\" json:\"mergejoinscan,omitempty\"`\n\tIndexDef          *Index                         `protobuf:\"bytes,21,opt,name=index_def\" json:\"index_def,omitempty\"`\n\tOffset            *int32                         `protobuf:\"varint,10,opt,name=offset,def=0\" json:\"offset,omitempty\"`\n\tLimit             *int32                         `protobuf:\"varint,11,opt,name=limit\" json:\"limit,omitempty\"`\n\tKeysOnly          *bool                          `protobuf:\"varint,12,req,name=keys_only\" json:\"keys_only,omitempty\"`\n\tPropertyName      []string                       `protobuf:\"bytes,24,rep,name=property_name\" json:\"property_name,omitempty\"`\n\tDistinctInfixSize *int32                         `protobuf:\"varint,25,opt,name=distinct_infix_size\" json:\"distinct_infix_size,omitempty\"`\n\tEntityfilter      *CompiledQuery_EntityFilter    `protobuf:\"group,13,opt,name=EntityFilter\" json:\"entityfilter,omitempty\"`\n\tXXX_unrecognized  []byte                         `json:\"-\"`\n}\n\nfunc (m *CompiledQuery) Reset()         { *m = CompiledQuery{} }\nfunc (m *CompiledQuery) String() string { return proto.CompactTextString(m) }\nfunc (*CompiledQuery) ProtoMessage()    {}\n\nconst Default_CompiledQuery_Offset int32 = 0\n\nfunc (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {\n\tif m != nil {\n\t\treturn m.Primaryscan\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {\n\tif m != nil {\n\t\treturn m.Mergejoinscan\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledQuery) GetIndexDef() *Index {\n\tif m != nil {\n\t\treturn m.IndexDef\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledQuery) GetOffset() int32 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn Default_CompiledQuery_Offset\n}\n\nfunc (m *CompiledQuery) GetLimit() int32 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn 0\n}\n\nfunc (m *CompiledQuery) GetKeysOnly() bool {\n\tif m != nil && m.KeysOnly != nil {\n\t\treturn *m.KeysOnly\n\t}\n\treturn false\n}\n\nfunc (m *CompiledQuery) GetPropertyName() []string {\n\tif m != nil {\n\t\treturn m.PropertyName\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledQuery) GetDistinctInfixSize() int32 {\n\tif m != nil && m.DistinctInfixSize != nil {\n\t\treturn *m.DistinctInfixSize\n\t}\n\treturn 0\n}\n\nfunc (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {\n\tif m != nil {\n\t\treturn m.Entityfilter\n\t}\n\treturn nil\n}\n\ntype CompiledQuery_PrimaryScan struct {\n\tIndexName                  *string  `protobuf:\"bytes,2,opt,name=index_name\" json:\"index_name,omitempty\"`\n\tStartKey                   *string  `protobuf:\"bytes,3,opt,name=start_key\" json:\"start_key,omitempty\"`\n\tStartInclusive             *bool    `protobuf:\"varint,4,opt,name=start_inclusive\" json:\"start_inclusive,omitempty\"`\n\tEndKey                     *string  `protobuf:\"bytes,5,opt,name=end_key\" json:\"end_key,omitempty\"`\n\tEndInclusive               *bool    `protobuf:\"varint,6,opt,name=end_inclusive\" json:\"end_inclusive,omitempty\"`\n\tStartPostfixValue          []string `protobuf:\"bytes,22,rep,name=start_postfix_value\" json:\"start_postfix_value,omitempty\"`\n\tEndPostfixValue            []string `protobuf:\"bytes,23,rep,name=end_postfix_value\" json:\"end_postfix_value,omitempty\"`\n\tEndUnappliedLogTimestampUs *int64   `protobuf:\"varint,19,opt,name=end_unapplied_log_timestamp_us\" json:\"end_unapplied_log_timestamp_us,omitempty\"`\n\tXXX_unrecognized           []byte   `json:\"-\"`\n}\n\nfunc (m *CompiledQuery_PrimaryScan) Reset()         { *m = CompiledQuery_PrimaryScan{} }\nfunc (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }\nfunc (*CompiledQuery_PrimaryScan) ProtoMessage()    {}\n\nfunc (m *CompiledQuery_PrimaryScan) GetIndexName() string {\n\tif m != nil && m.IndexName != nil {\n\t\treturn *m.IndexName\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompiledQuery_PrimaryScan) GetStartKey() string {\n\tif m != nil && m.StartKey != nil {\n\t\treturn *m.StartKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {\n\tif m != nil && m.StartInclusive != nil {\n\t\treturn *m.StartInclusive\n\t}\n\treturn false\n}\n\nfunc (m *CompiledQuery_PrimaryScan) GetEndKey() string {\n\tif m != nil && m.EndKey != nil {\n\t\treturn *m.EndKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {\n\tif m != nil && m.EndInclusive != nil {\n\t\treturn *m.EndInclusive\n\t}\n\treturn false\n}\n\nfunc (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {\n\tif m != nil {\n\t\treturn m.StartPostfixValue\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {\n\tif m != nil {\n\t\treturn m.EndPostfixValue\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {\n\tif m != nil && m.EndUnappliedLogTimestampUs != nil {\n\t\treturn *m.EndUnappliedLogTimestampUs\n\t}\n\treturn 0\n}\n\ntype CompiledQuery_MergeJoinScan struct {\n\tIndexName        *string  `protobuf:\"bytes,8,req,name=index_name\" json:\"index_name,omitempty\"`\n\tPrefixValue      []string `protobuf:\"bytes,9,rep,name=prefix_value\" json:\"prefix_value,omitempty\"`\n\tValuePrefix      *bool    `protobuf:\"varint,20,opt,name=value_prefix,def=0\" json:\"value_prefix,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *CompiledQuery_MergeJoinScan) Reset()         { *m = CompiledQuery_MergeJoinScan{} }\nfunc (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }\nfunc (*CompiledQuery_MergeJoinScan) ProtoMessage()    {}\n\nconst Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false\n\nfunc (m *CompiledQuery_MergeJoinScan) GetIndexName() string {\n\tif m != nil && m.IndexName != nil {\n\t\treturn *m.IndexName\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {\n\tif m != nil {\n\t\treturn m.PrefixValue\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {\n\tif m != nil && m.ValuePrefix != nil {\n\t\treturn *m.ValuePrefix\n\t}\n\treturn Default_CompiledQuery_MergeJoinScan_ValuePrefix\n}\n\ntype CompiledQuery_EntityFilter struct {\n\tDistinct         *bool      `protobuf:\"varint,14,opt,name=distinct,def=0\" json:\"distinct,omitempty\"`\n\tKind             *string    `protobuf:\"bytes,17,opt,name=kind\" json:\"kind,omitempty\"`\n\tAncestor         *Reference `protobuf:\"bytes,18,opt,name=ancestor\" json:\"ancestor,omitempty\"`\n\tXXX_unrecognized []byte     `json:\"-\"`\n}\n\nfunc (m *CompiledQuery_EntityFilter) Reset()         { *m = CompiledQuery_EntityFilter{} }\nfunc (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }\nfunc (*CompiledQuery_EntityFilter) ProtoMessage()    {}\n\nconst Default_CompiledQuery_EntityFilter_Distinct bool = false\n\nfunc (m *CompiledQuery_EntityFilter) GetDistinct() bool {\n\tif m != nil && m.Distinct != nil {\n\t\treturn *m.Distinct\n\t}\n\treturn Default_CompiledQuery_EntityFilter_Distinct\n}\n\nfunc (m *CompiledQuery_EntityFilter) GetKind() string {\n\tif m != nil && m.Kind != nil {\n\t\treturn *m.Kind\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {\n\tif m != nil {\n\t\treturn m.Ancestor\n\t}\n\treturn nil\n}\n\ntype CompiledCursor struct {\n\tPosition         *CompiledCursor_Position `protobuf:\"group,2,opt,name=Position\" json:\"position,omitempty\"`\n\tXXX_unrecognized []byte                   `json:\"-\"`\n}\n\nfunc (m *CompiledCursor) Reset()         { *m = CompiledCursor{} }\nfunc (m *CompiledCursor) String() string { return proto.CompactTextString(m) }\nfunc (*CompiledCursor) ProtoMessage()    {}\n\nfunc (m *CompiledCursor) GetPosition() *CompiledCursor_Position {\n\tif m != nil {\n\t\treturn m.Position\n\t}\n\treturn nil\n}\n\ntype CompiledCursor_Position struct {\n\tStartKey         *string                               `protobuf:\"bytes,27,opt,name=start_key\" json:\"start_key,omitempty\"`\n\tIndexvalue       []*CompiledCursor_Position_IndexValue `protobuf:\"group,29,rep,name=IndexValue\" json:\"indexvalue,omitempty\"`\n\tKey              *Reference                            `protobuf:\"bytes,32,opt,name=key\" json:\"key,omitempty\"`\n\tStartInclusive   *bool                                 `protobuf:\"varint,28,opt,name=start_inclusive,def=1\" json:\"start_inclusive,omitempty\"`\n\tXXX_unrecognized []byte                                `json:\"-\"`\n}\n\nfunc (m *CompiledCursor_Position) Reset()         { *m = CompiledCursor_Position{} }\nfunc (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }\nfunc (*CompiledCursor_Position) ProtoMessage()    {}\n\nconst Default_CompiledCursor_Position_StartInclusive bool = true\n\nfunc (m *CompiledCursor_Position) GetStartKey() string {\n\tif m != nil && m.StartKey != nil {\n\t\treturn *m.StartKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {\n\tif m != nil {\n\t\treturn m.Indexvalue\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledCursor_Position) GetKey() *Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *CompiledCursor_Position) GetStartInclusive() bool {\n\tif m != nil && m.StartInclusive != nil {\n\t\treturn *m.StartInclusive\n\t}\n\treturn Default_CompiledCursor_Position_StartInclusive\n}\n\ntype CompiledCursor_Position_IndexValue struct {\n\tProperty         *string        `protobuf:\"bytes,30,opt,name=property\" json:\"property,omitempty\"`\n\tValue            *PropertyValue `protobuf:\"bytes,31,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte         `json:\"-\"`\n}\n\nfunc (m *CompiledCursor_Position_IndexValue) Reset()         { *m = CompiledCursor_Position_IndexValue{} }\nfunc (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }\nfunc (*CompiledCursor_Position_IndexValue) ProtoMessage()    {}\n\nfunc (m *CompiledCursor_Position_IndexValue) GetProperty() string {\n\tif m != nil && m.Property != nil {\n\t\treturn *m.Property\n\t}\n\treturn \"\"\n}\n\nfunc (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype Cursor struct {\n\tCursor           *uint64 `protobuf:\"fixed64,1,req,name=cursor\" json:\"cursor,omitempty\"`\n\tApp              *string `protobuf:\"bytes,2,opt,name=app\" json:\"app,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Cursor) Reset()         { *m = Cursor{} }\nfunc (m *Cursor) String() string { return proto.CompactTextString(m) }\nfunc (*Cursor) ProtoMessage()    {}\n\nfunc (m *Cursor) GetCursor() uint64 {\n\tif m != nil && m.Cursor != nil {\n\t\treturn *m.Cursor\n\t}\n\treturn 0\n}\n\nfunc (m *Cursor) GetApp() string {\n\tif m != nil && m.App != nil {\n\t\treturn *m.App\n\t}\n\treturn \"\"\n}\n\ntype Error struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Error) Reset()         { *m = Error{} }\nfunc (m *Error) String() string { return proto.CompactTextString(m) }\nfunc (*Error) ProtoMessage()    {}\n\ntype Cost struct {\n\tIndexWrites             *int32           `protobuf:\"varint,1,opt,name=index_writes\" json:\"index_writes,omitempty\"`\n\tIndexWriteBytes         *int32           `protobuf:\"varint,2,opt,name=index_write_bytes\" json:\"index_write_bytes,omitempty\"`\n\tEntityWrites            *int32           `protobuf:\"varint,3,opt,name=entity_writes\" json:\"entity_writes,omitempty\"`\n\tEntityWriteBytes        *int32           `protobuf:\"varint,4,opt,name=entity_write_bytes\" json:\"entity_write_bytes,omitempty\"`\n\tCommitcost              *Cost_CommitCost `protobuf:\"group,5,opt,name=CommitCost\" json:\"commitcost,omitempty\"`\n\tApproximateStorageDelta *int32           `protobuf:\"varint,8,opt,name=approximate_storage_delta\" json:\"approximate_storage_delta,omitempty\"`\n\tIdSequenceUpdates       *int32           `protobuf:\"varint,9,opt,name=id_sequence_updates\" json:\"id_sequence_updates,omitempty\"`\n\tXXX_unrecognized        []byte           `json:\"-\"`\n}\n\nfunc (m *Cost) Reset()         { *m = Cost{} }\nfunc (m *Cost) String() string { return proto.CompactTextString(m) }\nfunc (*Cost) ProtoMessage()    {}\n\nfunc (m *Cost) GetIndexWrites() int32 {\n\tif m != nil && m.IndexWrites != nil {\n\t\treturn *m.IndexWrites\n\t}\n\treturn 0\n}\n\nfunc (m *Cost) GetIndexWriteBytes() int32 {\n\tif m != nil && m.IndexWriteBytes != nil {\n\t\treturn *m.IndexWriteBytes\n\t}\n\treturn 0\n}\n\nfunc (m *Cost) GetEntityWrites() int32 {\n\tif m != nil && m.EntityWrites != nil {\n\t\treturn *m.EntityWrites\n\t}\n\treturn 0\n}\n\nfunc (m *Cost) GetEntityWriteBytes() int32 {\n\tif m != nil && m.EntityWriteBytes != nil {\n\t\treturn *m.EntityWriteBytes\n\t}\n\treturn 0\n}\n\nfunc (m *Cost) GetCommitcost() *Cost_CommitCost {\n\tif m != nil {\n\t\treturn m.Commitcost\n\t}\n\treturn nil\n}\n\nfunc (m *Cost) GetApproximateStorageDelta() int32 {\n\tif m != nil && m.ApproximateStorageDelta != nil {\n\t\treturn *m.ApproximateStorageDelta\n\t}\n\treturn 0\n}\n\nfunc (m *Cost) GetIdSequenceUpdates() int32 {\n\tif m != nil && m.IdSequenceUpdates != nil {\n\t\treturn *m.IdSequenceUpdates\n\t}\n\treturn 0\n}\n\ntype Cost_CommitCost struct {\n\tRequestedEntityPuts    *int32 `protobuf:\"varint,6,opt,name=requested_entity_puts\" json:\"requested_entity_puts,omitempty\"`\n\tRequestedEntityDeletes *int32 `protobuf:\"varint,7,opt,name=requested_entity_deletes\" json:\"requested_entity_deletes,omitempty\"`\n\tXXX_unrecognized       []byte `json:\"-\"`\n}\n\nfunc (m *Cost_CommitCost) Reset()         { *m = Cost_CommitCost{} }\nfunc (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }\nfunc (*Cost_CommitCost) ProtoMessage()    {}\n\nfunc (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {\n\tif m != nil && m.RequestedEntityPuts != nil {\n\t\treturn *m.RequestedEntityPuts\n\t}\n\treturn 0\n}\n\nfunc (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {\n\tif m != nil && m.RequestedEntityDeletes != nil {\n\t\treturn *m.RequestedEntityDeletes\n\t}\n\treturn 0\n}\n\ntype GetRequest struct {\n\tHeader           *InternalHeader `protobuf:\"bytes,6,opt,name=header\" json:\"header,omitempty\"`\n\tKey              []*Reference    `protobuf:\"bytes,1,rep,name=key\" json:\"key,omitempty\"`\n\tTransaction      *Transaction    `protobuf:\"bytes,2,opt,name=transaction\" json:\"transaction,omitempty\"`\n\tFailoverMs       *int64          `protobuf:\"varint,3,opt,name=failover_ms\" json:\"failover_ms,omitempty\"`\n\tStrong           *bool           `protobuf:\"varint,4,opt,name=strong\" json:\"strong,omitempty\"`\n\tAllowDeferred    *bool           `protobuf:\"varint,5,opt,name=allow_deferred,def=0\" json:\"allow_deferred,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *GetRequest) Reset()         { *m = GetRequest{} }\nfunc (m *GetRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetRequest) ProtoMessage()    {}\n\nconst Default_GetRequest_AllowDeferred bool = false\n\nfunc (m *GetRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *GetRequest) GetKey() []*Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *GetRequest) GetTransaction() *Transaction {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\nfunc (m *GetRequest) GetFailoverMs() int64 {\n\tif m != nil && m.FailoverMs != nil {\n\t\treturn *m.FailoverMs\n\t}\n\treturn 0\n}\n\nfunc (m *GetRequest) GetStrong() bool {\n\tif m != nil && m.Strong != nil {\n\t\treturn *m.Strong\n\t}\n\treturn false\n}\n\nfunc (m *GetRequest) GetAllowDeferred() bool {\n\tif m != nil && m.AllowDeferred != nil {\n\t\treturn *m.AllowDeferred\n\t}\n\treturn Default_GetRequest_AllowDeferred\n}\n\ntype GetResponse struct {\n\tEntity           []*GetResponse_Entity `protobuf:\"group,1,rep,name=Entity\" json:\"entity,omitempty\"`\n\tDeferred         []*Reference          `protobuf:\"bytes,5,rep,name=deferred\" json:\"deferred,omitempty\"`\n\tInOrder          *bool                 `protobuf:\"varint,6,opt,name=in_order,def=1\" json:\"in_order,omitempty\"`\n\tXXX_unrecognized []byte                `json:\"-\"`\n}\n\nfunc (m *GetResponse) Reset()         { *m = GetResponse{} }\nfunc (m *GetResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetResponse) ProtoMessage()    {}\n\nconst Default_GetResponse_InOrder bool = true\n\nfunc (m *GetResponse) GetEntity() []*GetResponse_Entity {\n\tif m != nil {\n\t\treturn m.Entity\n\t}\n\treturn nil\n}\n\nfunc (m *GetResponse) GetDeferred() []*Reference {\n\tif m != nil {\n\t\treturn m.Deferred\n\t}\n\treturn nil\n}\n\nfunc (m *GetResponse) GetInOrder() bool {\n\tif m != nil && m.InOrder != nil {\n\t\treturn *m.InOrder\n\t}\n\treturn Default_GetResponse_InOrder\n}\n\ntype GetResponse_Entity struct {\n\tEntity           *EntityProto `protobuf:\"bytes,2,opt,name=entity\" json:\"entity,omitempty\"`\n\tKey              *Reference   `protobuf:\"bytes,4,opt,name=key\" json:\"key,omitempty\"`\n\tVersion          *int64       `protobuf:\"varint,3,opt,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *GetResponse_Entity) Reset()         { *m = GetResponse_Entity{} }\nfunc (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }\nfunc (*GetResponse_Entity) ProtoMessage()    {}\n\nfunc (m *GetResponse_Entity) GetEntity() *EntityProto {\n\tif m != nil {\n\t\treturn m.Entity\n\t}\n\treturn nil\n}\n\nfunc (m *GetResponse_Entity) GetKey() *Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *GetResponse_Entity) GetVersion() int64 {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn 0\n}\n\ntype PutRequest struct {\n\tHeader           *InternalHeader          `protobuf:\"bytes,11,opt,name=header\" json:\"header,omitempty\"`\n\tEntity           []*EntityProto           `protobuf:\"bytes,1,rep,name=entity\" json:\"entity,omitempty\"`\n\tTransaction      *Transaction             `protobuf:\"bytes,2,opt,name=transaction\" json:\"transaction,omitempty\"`\n\tCompositeIndex   []*CompositeIndex        `protobuf:\"bytes,3,rep,name=composite_index\" json:\"composite_index,omitempty\"`\n\tTrusted          *bool                    `protobuf:\"varint,4,opt,name=trusted,def=0\" json:\"trusted,omitempty\"`\n\tForce            *bool                    `protobuf:\"varint,7,opt,name=force,def=0\" json:\"force,omitempty\"`\n\tMarkChanges      *bool                    `protobuf:\"varint,8,opt,name=mark_changes,def=0\" json:\"mark_changes,omitempty\"`\n\tSnapshot         []*Snapshot              `protobuf:\"bytes,9,rep,name=snapshot\" json:\"snapshot,omitempty\"`\n\tAutoIdPolicy     *PutRequest_AutoIdPolicy `protobuf:\"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0\" json:\"auto_id_policy,omitempty\"`\n\tXXX_unrecognized []byte                   `json:\"-\"`\n}\n\nfunc (m *PutRequest) Reset()         { *m = PutRequest{} }\nfunc (m *PutRequest) String() string { return proto.CompactTextString(m) }\nfunc (*PutRequest) ProtoMessage()    {}\n\nconst Default_PutRequest_Trusted bool = false\nconst Default_PutRequest_Force bool = false\nconst Default_PutRequest_MarkChanges bool = false\nconst Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT\n\nfunc (m *PutRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *PutRequest) GetEntity() []*EntityProto {\n\tif m != nil {\n\t\treturn m.Entity\n\t}\n\treturn nil\n}\n\nfunc (m *PutRequest) GetTransaction() *Transaction {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\nfunc (m *PutRequest) GetCompositeIndex() []*CompositeIndex {\n\tif m != nil {\n\t\treturn m.CompositeIndex\n\t}\n\treturn nil\n}\n\nfunc (m *PutRequest) GetTrusted() bool {\n\tif m != nil && m.Trusted != nil {\n\t\treturn *m.Trusted\n\t}\n\treturn Default_PutRequest_Trusted\n}\n\nfunc (m *PutRequest) GetForce() bool {\n\tif m != nil && m.Force != nil {\n\t\treturn *m.Force\n\t}\n\treturn Default_PutRequest_Force\n}\n\nfunc (m *PutRequest) GetMarkChanges() bool {\n\tif m != nil && m.MarkChanges != nil {\n\t\treturn *m.MarkChanges\n\t}\n\treturn Default_PutRequest_MarkChanges\n}\n\nfunc (m *PutRequest) GetSnapshot() []*Snapshot {\n\tif m != nil {\n\t\treturn m.Snapshot\n\t}\n\treturn nil\n}\n\nfunc (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {\n\tif m != nil && m.AutoIdPolicy != nil {\n\t\treturn *m.AutoIdPolicy\n\t}\n\treturn Default_PutRequest_AutoIdPolicy\n}\n\ntype PutResponse struct {\n\tKey              []*Reference `protobuf:\"bytes,1,rep,name=key\" json:\"key,omitempty\"`\n\tCost             *Cost        `protobuf:\"bytes,2,opt,name=cost\" json:\"cost,omitempty\"`\n\tVersion          []int64      `protobuf:\"varint,3,rep,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *PutResponse) Reset()         { *m = PutResponse{} }\nfunc (m *PutResponse) String() string { return proto.CompactTextString(m) }\nfunc (*PutResponse) ProtoMessage()    {}\n\nfunc (m *PutResponse) GetKey() []*Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *PutResponse) GetCost() *Cost {\n\tif m != nil {\n\t\treturn m.Cost\n\t}\n\treturn nil\n}\n\nfunc (m *PutResponse) GetVersion() []int64 {\n\tif m != nil {\n\t\treturn m.Version\n\t}\n\treturn nil\n}\n\ntype TouchRequest struct {\n\tHeader           *InternalHeader   `protobuf:\"bytes,10,opt,name=header\" json:\"header,omitempty\"`\n\tKey              []*Reference      `protobuf:\"bytes,1,rep,name=key\" json:\"key,omitempty\"`\n\tCompositeIndex   []*CompositeIndex `protobuf:\"bytes,2,rep,name=composite_index\" json:\"composite_index,omitempty\"`\n\tForce            *bool             `protobuf:\"varint,3,opt,name=force,def=0\" json:\"force,omitempty\"`\n\tSnapshot         []*Snapshot       `protobuf:\"bytes,9,rep,name=snapshot\" json:\"snapshot,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *TouchRequest) Reset()         { *m = TouchRequest{} }\nfunc (m *TouchRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TouchRequest) ProtoMessage()    {}\n\nconst Default_TouchRequest_Force bool = false\n\nfunc (m *TouchRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *TouchRequest) GetKey() []*Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {\n\tif m != nil {\n\t\treturn m.CompositeIndex\n\t}\n\treturn nil\n}\n\nfunc (m *TouchRequest) GetForce() bool {\n\tif m != nil && m.Force != nil {\n\t\treturn *m.Force\n\t}\n\treturn Default_TouchRequest_Force\n}\n\nfunc (m *TouchRequest) GetSnapshot() []*Snapshot {\n\tif m != nil {\n\t\treturn m.Snapshot\n\t}\n\treturn nil\n}\n\ntype TouchResponse struct {\n\tCost             *Cost  `protobuf:\"bytes,1,opt,name=cost\" json:\"cost,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TouchResponse) Reset()         { *m = TouchResponse{} }\nfunc (m *TouchResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TouchResponse) ProtoMessage()    {}\n\nfunc (m *TouchResponse) GetCost() *Cost {\n\tif m != nil {\n\t\treturn m.Cost\n\t}\n\treturn nil\n}\n\ntype DeleteRequest struct {\n\tHeader           *InternalHeader `protobuf:\"bytes,10,opt,name=header\" json:\"header,omitempty\"`\n\tKey              []*Reference    `protobuf:\"bytes,6,rep,name=key\" json:\"key,omitempty\"`\n\tTransaction      *Transaction    `protobuf:\"bytes,5,opt,name=transaction\" json:\"transaction,omitempty\"`\n\tTrusted          *bool           `protobuf:\"varint,4,opt,name=trusted,def=0\" json:\"trusted,omitempty\"`\n\tForce            *bool           `protobuf:\"varint,7,opt,name=force,def=0\" json:\"force,omitempty\"`\n\tMarkChanges      *bool           `protobuf:\"varint,8,opt,name=mark_changes,def=0\" json:\"mark_changes,omitempty\"`\n\tSnapshot         []*Snapshot     `protobuf:\"bytes,9,rep,name=snapshot\" json:\"snapshot,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *DeleteRequest) Reset()         { *m = DeleteRequest{} }\nfunc (m *DeleteRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteRequest) ProtoMessage()    {}\n\nconst Default_DeleteRequest_Trusted bool = false\nconst Default_DeleteRequest_Force bool = false\nconst Default_DeleteRequest_MarkChanges bool = false\n\nfunc (m *DeleteRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteRequest) GetKey() []*Reference {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteRequest) GetTransaction() *Transaction {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteRequest) GetTrusted() bool {\n\tif m != nil && m.Trusted != nil {\n\t\treturn *m.Trusted\n\t}\n\treturn Default_DeleteRequest_Trusted\n}\n\nfunc (m *DeleteRequest) GetForce() bool {\n\tif m != nil && m.Force != nil {\n\t\treturn *m.Force\n\t}\n\treturn Default_DeleteRequest_Force\n}\n\nfunc (m *DeleteRequest) GetMarkChanges() bool {\n\tif m != nil && m.MarkChanges != nil {\n\t\treturn *m.MarkChanges\n\t}\n\treturn Default_DeleteRequest_MarkChanges\n}\n\nfunc (m *DeleteRequest) GetSnapshot() []*Snapshot {\n\tif m != nil {\n\t\treturn m.Snapshot\n\t}\n\treturn nil\n}\n\ntype DeleteResponse struct {\n\tCost             *Cost   `protobuf:\"bytes,1,opt,name=cost\" json:\"cost,omitempty\"`\n\tVersion          []int64 `protobuf:\"varint,3,rep,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DeleteResponse) Reset()         { *m = DeleteResponse{} }\nfunc (m *DeleteResponse) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteResponse) ProtoMessage()    {}\n\nfunc (m *DeleteResponse) GetCost() *Cost {\n\tif m != nil {\n\t\treturn m.Cost\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteResponse) GetVersion() []int64 {\n\tif m != nil {\n\t\treturn m.Version\n\t}\n\treturn nil\n}\n\ntype NextRequest struct {\n\tHeader           *InternalHeader `protobuf:\"bytes,5,opt,name=header\" json:\"header,omitempty\"`\n\tCursor           *Cursor         `protobuf:\"bytes,1,req,name=cursor\" json:\"cursor,omitempty\"`\n\tCount            *int32          `protobuf:\"varint,2,opt,name=count\" json:\"count,omitempty\"`\n\tOffset           *int32          `protobuf:\"varint,4,opt,name=offset,def=0\" json:\"offset,omitempty\"`\n\tCompile          *bool           `protobuf:\"varint,3,opt,name=compile,def=0\" json:\"compile,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *NextRequest) Reset()         { *m = NextRequest{} }\nfunc (m *NextRequest) String() string { return proto.CompactTextString(m) }\nfunc (*NextRequest) ProtoMessage()    {}\n\nconst Default_NextRequest_Offset int32 = 0\nconst Default_NextRequest_Compile bool = false\n\nfunc (m *NextRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *NextRequest) GetCursor() *Cursor {\n\tif m != nil {\n\t\treturn m.Cursor\n\t}\n\treturn nil\n}\n\nfunc (m *NextRequest) GetCount() int32 {\n\tif m != nil && m.Count != nil {\n\t\treturn *m.Count\n\t}\n\treturn 0\n}\n\nfunc (m *NextRequest) GetOffset() int32 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn Default_NextRequest_Offset\n}\n\nfunc (m *NextRequest) GetCompile() bool {\n\tif m != nil && m.Compile != nil {\n\t\treturn *m.Compile\n\t}\n\treturn Default_NextRequest_Compile\n}\n\ntype QueryResult struct {\n\tCursor           *Cursor           `protobuf:\"bytes,1,opt,name=cursor\" json:\"cursor,omitempty\"`\n\tResult           []*EntityProto    `protobuf:\"bytes,2,rep,name=result\" json:\"result,omitempty\"`\n\tSkippedResults   *int32            `protobuf:\"varint,7,opt,name=skipped_results\" json:\"skipped_results,omitempty\"`\n\tMoreResults      *bool             `protobuf:\"varint,3,req,name=more_results\" json:\"more_results,omitempty\"`\n\tKeysOnly         *bool             `protobuf:\"varint,4,opt,name=keys_only\" json:\"keys_only,omitempty\"`\n\tIndexOnly        *bool             `protobuf:\"varint,9,opt,name=index_only\" json:\"index_only,omitempty\"`\n\tSmallOps         *bool             `protobuf:\"varint,10,opt,name=small_ops\" json:\"small_ops,omitempty\"`\n\tCompiledQuery    *CompiledQuery    `protobuf:\"bytes,5,opt,name=compiled_query\" json:\"compiled_query,omitempty\"`\n\tCompiledCursor   *CompiledCursor   `protobuf:\"bytes,6,opt,name=compiled_cursor\" json:\"compiled_cursor,omitempty\"`\n\tIndex            []*CompositeIndex `protobuf:\"bytes,8,rep,name=index\" json:\"index,omitempty\"`\n\tVersion          []int64           `protobuf:\"varint,11,rep,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *QueryResult) Reset()         { *m = QueryResult{} }\nfunc (m *QueryResult) String() string { return proto.CompactTextString(m) }\nfunc (*QueryResult) ProtoMessage()    {}\n\nfunc (m *QueryResult) GetCursor() *Cursor {\n\tif m != nil {\n\t\treturn m.Cursor\n\t}\n\treturn nil\n}\n\nfunc (m *QueryResult) GetResult() []*EntityProto {\n\tif m != nil {\n\t\treturn m.Result\n\t}\n\treturn nil\n}\n\nfunc (m *QueryResult) GetSkippedResults() int32 {\n\tif m != nil && m.SkippedResults != nil {\n\t\treturn *m.SkippedResults\n\t}\n\treturn 0\n}\n\nfunc (m *QueryResult) GetMoreResults() bool {\n\tif m != nil && m.MoreResults != nil {\n\t\treturn *m.MoreResults\n\t}\n\treturn false\n}\n\nfunc (m *QueryResult) GetKeysOnly() bool {\n\tif m != nil && m.KeysOnly != nil {\n\t\treturn *m.KeysOnly\n\t}\n\treturn false\n}\n\nfunc (m *QueryResult) GetIndexOnly() bool {\n\tif m != nil && m.IndexOnly != nil {\n\t\treturn *m.IndexOnly\n\t}\n\treturn false\n}\n\nfunc (m *QueryResult) GetSmallOps() bool {\n\tif m != nil && m.SmallOps != nil {\n\t\treturn *m.SmallOps\n\t}\n\treturn false\n}\n\nfunc (m *QueryResult) GetCompiledQuery() *CompiledQuery {\n\tif m != nil {\n\t\treturn m.CompiledQuery\n\t}\n\treturn nil\n}\n\nfunc (m *QueryResult) GetCompiledCursor() *CompiledCursor {\n\tif m != nil {\n\t\treturn m.CompiledCursor\n\t}\n\treturn nil\n}\n\nfunc (m *QueryResult) GetIndex() []*CompositeIndex {\n\tif m != nil {\n\t\treturn m.Index\n\t}\n\treturn nil\n}\n\nfunc (m *QueryResult) GetVersion() []int64 {\n\tif m != nil {\n\t\treturn m.Version\n\t}\n\treturn nil\n}\n\ntype AllocateIdsRequest struct {\n\tHeader           *InternalHeader `protobuf:\"bytes,4,opt,name=header\" json:\"header,omitempty\"`\n\tModelKey         *Reference      `protobuf:\"bytes,1,opt,name=model_key\" json:\"model_key,omitempty\"`\n\tSize             *int64          `protobuf:\"varint,2,opt,name=size\" json:\"size,omitempty\"`\n\tMax              *int64          `protobuf:\"varint,3,opt,name=max\" json:\"max,omitempty\"`\n\tReserve          []*Reference    `protobuf:\"bytes,5,rep,name=reserve\" json:\"reserve,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *AllocateIdsRequest) Reset()         { *m = AllocateIdsRequest{} }\nfunc (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*AllocateIdsRequest) ProtoMessage()    {}\n\nfunc (m *AllocateIdsRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *AllocateIdsRequest) GetModelKey() *Reference {\n\tif m != nil {\n\t\treturn m.ModelKey\n\t}\n\treturn nil\n}\n\nfunc (m *AllocateIdsRequest) GetSize() int64 {\n\tif m != nil && m.Size != nil {\n\t\treturn *m.Size\n\t}\n\treturn 0\n}\n\nfunc (m *AllocateIdsRequest) GetMax() int64 {\n\tif m != nil && m.Max != nil {\n\t\treturn *m.Max\n\t}\n\treturn 0\n}\n\nfunc (m *AllocateIdsRequest) GetReserve() []*Reference {\n\tif m != nil {\n\t\treturn m.Reserve\n\t}\n\treturn nil\n}\n\ntype AllocateIdsResponse struct {\n\tStart            *int64 `protobuf:\"varint,1,req,name=start\" json:\"start,omitempty\"`\n\tEnd              *int64 `protobuf:\"varint,2,req,name=end\" json:\"end,omitempty\"`\n\tCost             *Cost  `protobuf:\"bytes,3,opt,name=cost\" json:\"cost,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *AllocateIdsResponse) Reset()         { *m = AllocateIdsResponse{} }\nfunc (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*AllocateIdsResponse) ProtoMessage()    {}\n\nfunc (m *AllocateIdsResponse) GetStart() int64 {\n\tif m != nil && m.Start != nil {\n\t\treturn *m.Start\n\t}\n\treturn 0\n}\n\nfunc (m *AllocateIdsResponse) GetEnd() int64 {\n\tif m != nil && m.End != nil {\n\t\treturn *m.End\n\t}\n\treturn 0\n}\n\nfunc (m *AllocateIdsResponse) GetCost() *Cost {\n\tif m != nil {\n\t\treturn m.Cost\n\t}\n\treturn nil\n}\n\ntype CompositeIndices struct {\n\tIndex            []*CompositeIndex `protobuf:\"bytes,1,rep,name=index\" json:\"index,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *CompositeIndices) Reset()         { *m = CompositeIndices{} }\nfunc (m *CompositeIndices) String() string { return proto.CompactTextString(m) }\nfunc (*CompositeIndices) ProtoMessage()    {}\n\nfunc (m *CompositeIndices) GetIndex() []*CompositeIndex {\n\tif m != nil {\n\t\treturn m.Index\n\t}\n\treturn nil\n}\n\ntype AddActionsRequest struct {\n\tHeader           *InternalHeader `protobuf:\"bytes,3,opt,name=header\" json:\"header,omitempty\"`\n\tTransaction      *Transaction    `protobuf:\"bytes,1,req,name=transaction\" json:\"transaction,omitempty\"`\n\tAction           []*Action       `protobuf:\"bytes,2,rep,name=action\" json:\"action,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *AddActionsRequest) Reset()         { *m = AddActionsRequest{} }\nfunc (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*AddActionsRequest) ProtoMessage()    {}\n\nfunc (m *AddActionsRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *AddActionsRequest) GetTransaction() *Transaction {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\nfunc (m *AddActionsRequest) GetAction() []*Action {\n\tif m != nil {\n\t\treturn m.Action\n\t}\n\treturn nil\n}\n\ntype AddActionsResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *AddActionsResponse) Reset()         { *m = AddActionsResponse{} }\nfunc (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*AddActionsResponse) ProtoMessage()    {}\n\ntype BeginTransactionRequest struct {\n\tHeader           *InternalHeader `protobuf:\"bytes,3,opt,name=header\" json:\"header,omitempty\"`\n\tApp              *string         `protobuf:\"bytes,1,req,name=app\" json:\"app,omitempty\"`\n\tAllowMultipleEg  *bool           `protobuf:\"varint,2,opt,name=allow_multiple_eg,def=0\" json:\"allow_multiple_eg,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *BeginTransactionRequest) Reset()         { *m = BeginTransactionRequest{} }\nfunc (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }\nfunc (*BeginTransactionRequest) ProtoMessage()    {}\n\nconst Default_BeginTransactionRequest_AllowMultipleEg bool = false\n\nfunc (m *BeginTransactionRequest) GetHeader() *InternalHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *BeginTransactionRequest) GetApp() string {\n\tif m != nil && m.App != nil {\n\t\treturn *m.App\n\t}\n\treturn \"\"\n}\n\nfunc (m *BeginTransactionRequest) GetAllowMultipleEg() bool {\n\tif m != nil && m.AllowMultipleEg != nil {\n\t\treturn *m.AllowMultipleEg\n\t}\n\treturn Default_BeginTransactionRequest_AllowMultipleEg\n}\n\ntype CommitResponse struct {\n\tCost             *Cost                     `protobuf:\"bytes,1,opt,name=cost\" json:\"cost,omitempty\"`\n\tVersion          []*CommitResponse_Version `protobuf:\"group,3,rep,name=Version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte                    `json:\"-\"`\n}\n\nfunc (m *CommitResponse) Reset()         { *m = CommitResponse{} }\nfunc (m *CommitResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CommitResponse) ProtoMessage()    {}\n\nfunc (m *CommitResponse) GetCost() *Cost {\n\tif m != nil {\n\t\treturn m.Cost\n\t}\n\treturn nil\n}\n\nfunc (m *CommitResponse) GetVersion() []*CommitResponse_Version {\n\tif m != nil {\n\t\treturn m.Version\n\t}\n\treturn nil\n}\n\ntype CommitResponse_Version struct {\n\tRootEntityKey    *Reference `protobuf:\"bytes,4,req,name=root_entity_key\" json:\"root_entity_key,omitempty\"`\n\tVersion          *int64     `protobuf:\"varint,5,req,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte     `json:\"-\"`\n}\n\nfunc (m *CommitResponse_Version) Reset()         { *m = CommitResponse_Version{} }\nfunc (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }\nfunc (*CommitResponse_Version) ProtoMessage()    {}\n\nfunc (m *CommitResponse_Version) GetRootEntityKey() *Reference {\n\tif m != nil {\n\t\treturn m.RootEntityKey\n\t}\n\treturn nil\n}\n\nfunc (m *CommitResponse_Version) GetVersion() int64 {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn 0\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"datastore\";\n\npackage appengine;\n\nmessage Action{}\n\nmessage PropertyValue {\n  optional int64 int64Value = 1;\n  optional bool booleanValue = 2;\n  optional string stringValue = 3;\n  optional double doubleValue = 4;\n\n  optional group PointValue = 5 {\n    required double x = 6;\n    required double y = 7;\n  }\n\n  optional group UserValue = 8 {\n    required string email = 9;\n    required string auth_domain = 10;\n    optional string nickname = 11;\n    optional string federated_identity = 21;\n    optional string federated_provider = 22;\n  }\n\n  optional group ReferenceValue = 12 {\n    required string app = 13;\n    optional string name_space = 20;\n    repeated group PathElement = 14 {\n      required string type = 15;\n      optional int64 id = 16;\n      optional string name = 17;\n    }\n  }\n}\n\nmessage Property {\n  enum Meaning {\n    NO_MEANING = 0;\n    BLOB = 14;\n    TEXT = 15;\n    BYTESTRING = 16;\n\n    ATOM_CATEGORY = 1;\n    ATOM_LINK = 2;\n    ATOM_TITLE = 3;\n    ATOM_CONTENT = 4;\n    ATOM_SUMMARY = 5;\n    ATOM_AUTHOR = 6;\n\n    GD_WHEN = 7;\n    GD_EMAIL = 8;\n    GEORSS_POINT = 9;\n    GD_IM = 10;\n\n    GD_PHONENUMBER = 11;\n    GD_POSTALADDRESS = 12;\n\n    GD_RATING = 13;\n\n    BLOBKEY = 17;\n    ENTITY_PROTO = 19;\n\n    INDEX_VALUE = 18;\n  };\n\n  optional Meaning meaning = 1 [default = NO_MEANING];\n  optional string meaning_uri = 2;\n\n  required string name = 3;\n\n  required PropertyValue value = 5;\n\n  required bool multiple = 4;\n\n  optional bool searchable = 6 [default=false];\n\n  enum FtsTokenizationOption {\n    HTML = 1;\n    ATOM = 2;\n  }\n\n  optional FtsTokenizationOption fts_tokenization_option = 8;\n\n  optional string locale = 9 [default = \"en\"];\n}\n\nmessage Path {\n  repeated group Element = 1 {\n    required string type = 2;\n    optional int64 id = 3;\n    optional string name = 4;\n  }\n}\n\nmessage Reference {\n  required string app = 13;\n  optional string name_space = 20;\n  required Path path = 14;\n}\n\nmessage User {\n  required string email = 1;\n  required string auth_domain = 2;\n  optional string nickname = 3;\n  optional string federated_identity = 6;\n  optional string federated_provider = 7;\n}\n\nmessage EntityProto {\n  required Reference key = 13;\n  required Path entity_group = 16;\n  optional User owner = 17;\n\n  enum Kind {\n    GD_CONTACT = 1;\n    GD_EVENT = 2;\n    GD_MESSAGE = 3;\n  }\n  optional Kind kind = 4;\n  optional string kind_uri = 5;\n\n  repeated Property property = 14;\n  repeated Property raw_property = 15;\n\n  optional int32 rank = 18;\n}\n\nmessage CompositeProperty {\n  required int64 index_id = 1;\n  repeated string value = 2;\n}\n\nmessage Index {\n  required string entity_type = 1;\n  required bool ancestor = 5;\n  repeated group Property = 2 {\n    required string name = 3;\n    enum Direction {\n      ASCENDING = 1;\n      DESCENDING = 2;\n    }\n    optional Direction direction = 4 [default = ASCENDING];\n  }\n}\n\nmessage CompositeIndex {\n  required string app_id = 1;\n  required int64 id = 2;\n  required Index definition = 3;\n\n  enum State {\n    WRITE_ONLY = 1;\n    READ_WRITE = 2;\n    DELETED = 3;\n    ERROR = 4;\n  }\n  required State state = 4;\n\n  optional bool only_use_if_required = 6 [default = false];\n}\n\nmessage IndexPostfix {\n  message IndexValue {\n    required string property_name = 1;\n    required PropertyValue value = 2;\n  }\n\n  repeated IndexValue index_value = 1;\n\n  optional Reference key = 2;\n\n  optional bool before = 3 [default=true];\n}\n\nmessage IndexPosition {\n  optional string key = 1;\n\n  optional bool before = 2 [default=true];\n}\n\nmessage Snapshot {\n  enum Status {\n    INACTIVE = 0;\n    ACTIVE = 1;\n  }\n\n  required int64 ts = 1;\n}\n\nmessage InternalHeader {\n  optional string qos = 1;\n}\n\nmessage Transaction {\n  optional InternalHeader header = 4;\n  required fixed64 handle = 1;\n  required string app = 2;\n  optional bool mark_changes = 3 [default = false];\n}\n\nmessage Query {\n  optional InternalHeader header = 39;\n\n  required string app = 1;\n  optional string name_space = 29;\n\n  optional string kind = 3;\n  optional Reference ancestor = 17;\n\n  repeated group Filter = 4 {\n    enum Operator {\n      LESS_THAN = 1;\n      LESS_THAN_OR_EQUAL = 2;\n      GREATER_THAN = 3;\n      GREATER_THAN_OR_EQUAL = 4;\n      EQUAL = 5;\n      IN = 6;\n      EXISTS = 7;\n    }\n\n    required Operator op = 6;\n    repeated Property property = 14;\n  }\n\n  optional string search_query = 8;\n\n  repeated group Order = 9 {\n    enum Direction {\n      ASCENDING = 1;\n      DESCENDING = 2;\n    }\n\n    required string property = 10;\n    optional Direction direction = 11 [default = ASCENDING];\n  }\n\n  enum Hint {\n    ORDER_FIRST = 1;\n    ANCESTOR_FIRST = 2;\n    FILTER_FIRST = 3;\n  }\n  optional Hint hint = 18;\n\n  optional int32 count = 23;\n\n  optional int32 offset = 12 [default = 0];\n\n  optional int32 limit = 16;\n\n  optional CompiledCursor compiled_cursor = 30;\n  optional CompiledCursor end_compiled_cursor = 31;\n\n  repeated CompositeIndex composite_index = 19;\n\n  optional bool require_perfect_plan = 20 [default = false];\n\n  optional bool keys_only = 21 [default = false];\n\n  optional Transaction transaction = 22;\n\n  optional bool compile = 25 [default = false];\n\n  optional int64 failover_ms = 26;\n\n  optional bool strong = 32;\n\n  repeated string property_name = 33;\n\n  repeated string group_by_property_name = 34;\n\n  optional bool distinct = 24;\n\n  optional int64 min_safe_time_seconds = 35;\n\n  repeated string safe_replica_name = 36;\n\n  optional bool persist_offset = 37 [default=false];\n}\n\nmessage CompiledQuery {\n  required group PrimaryScan = 1 {\n    optional string index_name = 2;\n\n    optional string start_key = 3;\n    optional bool start_inclusive = 4;\n    optional string end_key = 5;\n    optional bool end_inclusive = 6;\n\n    repeated string start_postfix_value = 22;\n    repeated string end_postfix_value = 23;\n\n    optional int64 end_unapplied_log_timestamp_us = 19;\n  }\n\n  repeated group MergeJoinScan = 7 {\n    required string index_name = 8;\n\n    repeated string prefix_value = 9;\n\n    optional bool value_prefix = 20 [default=false];\n  }\n\n  optional Index index_def = 21;\n\n  optional int32 offset = 10 [default = 0];\n\n  optional int32 limit = 11;\n\n  required bool keys_only = 12;\n\n  repeated string property_name = 24;\n\n  optional int32 distinct_infix_size = 25;\n\n  optional group EntityFilter = 13 {\n    optional bool distinct = 14 [default=false];\n\n    optional string kind = 17;\n    optional Reference ancestor = 18;\n  }\n}\n\nmessage CompiledCursor {\n  optional group Position = 2 {\n    optional string start_key = 27;\n\n    repeated group IndexValue = 29 {\n      optional string property = 30;\n      required PropertyValue value = 31;\n    }\n\n    optional Reference key = 32;\n\n    optional bool start_inclusive = 28 [default=true];\n  }\n}\n\nmessage Cursor {\n  required fixed64 cursor = 1;\n\n  optional string app = 2;\n}\n\nmessage Error {\n  enum ErrorCode {\n    BAD_REQUEST = 1;\n    CONCURRENT_TRANSACTION = 2;\n    INTERNAL_ERROR = 3;\n    NEED_INDEX = 4;\n    TIMEOUT = 5;\n    PERMISSION_DENIED = 6;\n    BIGTABLE_ERROR = 7;\n    COMMITTED_BUT_STILL_APPLYING = 8;\n    CAPABILITY_DISABLED = 9;\n    TRY_ALTERNATE_BACKEND = 10;\n    SAFE_TIME_TOO_OLD = 11;\n  }\n}\n\nmessage Cost {\n  optional int32 index_writes = 1;\n  optional int32 index_write_bytes = 2;\n  optional int32 entity_writes = 3;\n  optional int32 entity_write_bytes = 4;\n  optional group CommitCost = 5 {\n    optional int32 requested_entity_puts = 6;\n    optional int32 requested_entity_deletes = 7;\n  };\n  optional int32 approximate_storage_delta = 8;\n  optional int32 id_sequence_updates = 9;\n}\n\nmessage GetRequest {\n  optional InternalHeader header = 6;\n\n  repeated Reference key = 1;\n  optional Transaction transaction = 2;\n\n  optional int64 failover_ms = 3;\n\n  optional bool strong = 4;\n\n  optional bool allow_deferred = 5 [default=false];\n}\n\nmessage GetResponse {\n  repeated group Entity = 1 {\n    optional EntityProto entity = 2;\n    optional Reference key = 4;\n\n    optional int64 version = 3;\n  }\n\n  repeated Reference deferred = 5;\n\n  optional bool in_order = 6 [default=true];\n}\n\nmessage PutRequest {\n  optional InternalHeader header = 11;\n\n  repeated EntityProto entity = 1;\n  optional Transaction transaction = 2;\n  repeated CompositeIndex composite_index = 3;\n\n  optional bool trusted = 4 [default = false];\n\n  optional bool force = 7 [default = false];\n\n  optional bool mark_changes = 8 [default = false];\n  repeated Snapshot snapshot = 9;\n\n  enum AutoIdPolicy {\n    CURRENT = 0;\n    SEQUENTIAL = 1;\n  }\n  optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];\n}\n\nmessage PutResponse {\n  repeated Reference key = 1;\n  optional Cost cost = 2;\n  repeated int64 version = 3;\n}\n\nmessage TouchRequest {\n  optional InternalHeader header = 10;\n\n  repeated Reference key = 1;\n  repeated CompositeIndex composite_index = 2;\n  optional bool force = 3 [default = false];\n  repeated Snapshot snapshot = 9;\n}\n\nmessage TouchResponse {\n  optional Cost cost = 1;\n}\n\nmessage DeleteRequest {\n  optional InternalHeader header = 10;\n\n  repeated Reference key = 6;\n  optional Transaction transaction = 5;\n\n  optional bool trusted = 4 [default = false];\n\n  optional bool force = 7 [default = false];\n\n  optional bool mark_changes = 8 [default = false];\n  repeated Snapshot snapshot = 9;\n}\n\nmessage DeleteResponse {\n  optional Cost cost = 1;\n  repeated int64 version = 3;\n}\n\nmessage NextRequest {\n  optional InternalHeader header = 5;\n\n  required Cursor cursor = 1;\n  optional int32 count = 2;\n\n  optional int32 offset = 4 [default = 0];\n\n  optional bool compile = 3 [default = false];\n}\n\nmessage QueryResult {\n  optional Cursor cursor = 1;\n\n  repeated EntityProto result = 2;\n\n  optional int32 skipped_results = 7;\n\n  required bool more_results = 3;\n\n  optional bool keys_only = 4;\n\n  optional bool index_only = 9;\n\n  optional bool small_ops = 10;\n\n  optional CompiledQuery compiled_query = 5;\n\n  optional CompiledCursor compiled_cursor = 6;\n\n  repeated CompositeIndex index = 8;\n\n  repeated int64 version = 11;\n}\n\nmessage AllocateIdsRequest {\n  optional InternalHeader header = 4;\n\n  optional Reference model_key = 1;\n\n  optional int64 size = 2;\n\n  optional int64 max = 3;\n\n  repeated Reference reserve = 5;\n}\n\nmessage AllocateIdsResponse {\n  required int64 start = 1;\n  required int64 end = 2;\n  optional Cost cost = 3;\n}\n\nmessage CompositeIndices {\n  repeated CompositeIndex index = 1;\n}\n\nmessage AddActionsRequest {\n  optional InternalHeader header = 3;\n\n  required Transaction transaction = 1;\n  repeated Action action = 2;\n}\n\nmessage AddActionsResponse {\n}\n\nmessage BeginTransactionRequest {\n  optional InternalHeader header = 3;\n\n  required string app = 1;\n  optional bool allow_multiple_eg = 2 [default = false];\n}\n\nmessage CommitResponse {\n  optional Cost cost = 1;\n\n  repeated group Version = 3 {\n    required Reference root_entity_key = 4;\n    required int64 version = 5;\n  }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/identity.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage internal\n\nimport netcontext \"golang.org/x/net/context\"\n\n// These functions are implementations of the wrapper functions\n// in ../appengine/identity.go. See that file for commentary.\n\nfunc AppID(c netcontext.Context) string {\n\treturn appID(FullyQualifiedAppID(c))\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/identity_classic.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build appengine\n\npackage internal\n\nimport (\n\t\"appengine\"\n\n\tnetcontext \"golang.org/x/net/context\"\n)\n\nfunc DefaultVersionHostname(ctx netcontext.Context) string {\n\treturn appengine.DefaultVersionHostname(fromContext(ctx))\n}\n\nfunc RequestID(ctx netcontext.Context) string  { return appengine.RequestID(fromContext(ctx)) }\nfunc Datacenter(_ netcontext.Context) string   { return appengine.Datacenter() }\nfunc ServerSoftware() string                   { return appengine.ServerSoftware() }\nfunc ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }\nfunc VersionID(ctx netcontext.Context) string  { return appengine.VersionID(fromContext(ctx)) }\nfunc InstanceID() string                       { return appengine.InstanceID() }\nfunc IsDevAppServer() bool                     { return appengine.IsDevAppServer() }\n\nfunc fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/identity_vm.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build !appengine\n\npackage internal\n\nimport (\n\t\"net/http\"\n\t\"os\"\n\n\tnetcontext \"golang.org/x/net/context\"\n)\n\n// These functions are implementations of the wrapper functions\n// in ../appengine/identity.go. See that file for commentary.\n\nconst (\n\thDefaultVersionHostname = \"X-AppEngine-Default-Version-Hostname\"\n\thRequestLogId           = \"X-AppEngine-Request-Log-Id\"\n\thDatacenter             = \"X-AppEngine-Datacenter\"\n)\n\nfunc ctxHeaders(ctx netcontext.Context) http.Header {\n\treturn fromContext(ctx).Request().Header\n}\n\nfunc DefaultVersionHostname(ctx netcontext.Context) string {\n\treturn ctxHeaders(ctx).Get(hDefaultVersionHostname)\n}\n\nfunc RequestID(ctx netcontext.Context) string {\n\treturn ctxHeaders(ctx).Get(hRequestLogId)\n}\n\nfunc Datacenter(ctx netcontext.Context) string {\n\treturn ctxHeaders(ctx).Get(hDatacenter)\n}\n\nfunc ServerSoftware() string {\n\t// TODO(dsymonds): Remove fallback when we've verified this.\n\tif s := os.Getenv(\"SERVER_SOFTWARE\"); s != \"\" {\n\t\treturn s\n\t}\n\treturn \"Google App Engine/1.x.x\"\n}\n\n// TODO(dsymonds): Remove the metadata fetches.\n\nfunc ModuleName(_ netcontext.Context) string {\n\tif s := os.Getenv(\"GAE_MODULE_NAME\"); s != \"\" {\n\t\treturn s\n\t}\n\treturn string(mustGetMetadata(\"instance/attributes/gae_backend_name\"))\n}\n\nfunc VersionID(_ netcontext.Context) string {\n\tif s1, s2 := os.Getenv(\"GAE_MODULE_VERSION\"), os.Getenv(\"GAE_MINOR_VERSION\"); s1 != \"\" && s2 != \"\" {\n\t\treturn s1 + \".\" + s2\n\t}\n\treturn string(mustGetMetadata(\"instance/attributes/gae_backend_version\")) + \".\" + string(mustGetMetadata(\"instance/attributes/gae_backend_minor_version\"))\n}\n\nfunc InstanceID() string {\n\tif s := os.Getenv(\"GAE_MODULE_INSTANCE\"); s != \"\" {\n\t\treturn s\n\t}\n\treturn string(mustGetMetadata(\"instance/attributes/gae_backend_instance\"))\n}\n\nfunc partitionlessAppID() string {\n\t// gae_project has everything except the partition prefix.\n\tappID := os.Getenv(\"GAE_LONG_APP_ID\")\n\tif appID == \"\" {\n\t\tappID = string(mustGetMetadata(\"instance/attributes/gae_project\"))\n\t}\n\treturn appID\n}\n\nfunc fullyQualifiedAppID(_ netcontext.Context) string {\n\tappID := partitionlessAppID()\n\n\tpart := os.Getenv(\"GAE_PARTITION\")\n\tif part == \"\" {\n\t\tpart = string(mustGetMetadata(\"instance/attributes/gae_partition\"))\n\t}\n\n\tif part != \"\" {\n\t\tappID = part + \"~\" + appID\n\t}\n\treturn appID\n}\n\nfunc IsDevAppServer() bool {\n\treturn os.Getenv(\"RUN_WITH_DEVAPPSERVER\") != \"\"\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/image/images_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/image/images_service.proto\n// DO NOT EDIT!\n\n/*\nPackage image is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/image/images_service.proto\n\nIt has these top-level messages:\n\tImagesServiceError\n\tImagesServiceTransform\n\tTransform\n\tImageData\n\tInputSettings\n\tOutputSettings\n\tImagesTransformRequest\n\tImagesTransformResponse\n\tCompositeImageOptions\n\tImagesCanvas\n\tImagesCompositeRequest\n\tImagesCompositeResponse\n\tImagesHistogramRequest\n\tImagesHistogram\n\tImagesHistogramResponse\n\tImagesGetUrlBaseRequest\n\tImagesGetUrlBaseResponse\n\tImagesDeleteUrlBaseRequest\n\tImagesDeleteUrlBaseResponse\n*/\npackage image\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype ImagesServiceError_ErrorCode int32\n\nconst (\n\tImagesServiceError_UNSPECIFIED_ERROR  ImagesServiceError_ErrorCode = 1\n\tImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2\n\tImagesServiceError_NOT_IMAGE          ImagesServiceError_ErrorCode = 3\n\tImagesServiceError_BAD_IMAGE_DATA     ImagesServiceError_ErrorCode = 4\n\tImagesServiceError_IMAGE_TOO_LARGE    ImagesServiceError_ErrorCode = 5\n\tImagesServiceError_INVALID_BLOB_KEY   ImagesServiceError_ErrorCode = 6\n\tImagesServiceError_ACCESS_DENIED      ImagesServiceError_ErrorCode = 7\n\tImagesServiceError_OBJECT_NOT_FOUND   ImagesServiceError_ErrorCode = 8\n)\n\nvar ImagesServiceError_ErrorCode_name = map[int32]string{\n\t1: \"UNSPECIFIED_ERROR\",\n\t2: \"BAD_TRANSFORM_DATA\",\n\t3: \"NOT_IMAGE\",\n\t4: \"BAD_IMAGE_DATA\",\n\t5: \"IMAGE_TOO_LARGE\",\n\t6: \"INVALID_BLOB_KEY\",\n\t7: \"ACCESS_DENIED\",\n\t8: \"OBJECT_NOT_FOUND\",\n}\nvar ImagesServiceError_ErrorCode_value = map[string]int32{\n\t\"UNSPECIFIED_ERROR\":  1,\n\t\"BAD_TRANSFORM_DATA\": 2,\n\t\"NOT_IMAGE\":          3,\n\t\"BAD_IMAGE_DATA\":     4,\n\t\"IMAGE_TOO_LARGE\":    5,\n\t\"INVALID_BLOB_KEY\":   6,\n\t\"ACCESS_DENIED\":      7,\n\t\"OBJECT_NOT_FOUND\":   8,\n}\n\nfunc (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode {\n\tp := new(ImagesServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x ImagesServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, \"ImagesServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ImagesServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype ImagesServiceTransform_Type int32\n\nconst (\n\tImagesServiceTransform_RESIZE           ImagesServiceTransform_Type = 1\n\tImagesServiceTransform_ROTATE           ImagesServiceTransform_Type = 2\n\tImagesServiceTransform_HORIZONTAL_FLIP  ImagesServiceTransform_Type = 3\n\tImagesServiceTransform_VERTICAL_FLIP    ImagesServiceTransform_Type = 4\n\tImagesServiceTransform_CROP             ImagesServiceTransform_Type = 5\n\tImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6\n)\n\nvar ImagesServiceTransform_Type_name = map[int32]string{\n\t1: \"RESIZE\",\n\t2: \"ROTATE\",\n\t3: \"HORIZONTAL_FLIP\",\n\t4: \"VERTICAL_FLIP\",\n\t5: \"CROP\",\n\t6: \"IM_FEELING_LUCKY\",\n}\nvar ImagesServiceTransform_Type_value = map[string]int32{\n\t\"RESIZE\":           1,\n\t\"ROTATE\":           2,\n\t\"HORIZONTAL_FLIP\":  3,\n\t\"VERTICAL_FLIP\":    4,\n\t\"CROP\":             5,\n\t\"IM_FEELING_LUCKY\": 6,\n}\n\nfunc (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type {\n\tp := new(ImagesServiceTransform_Type)\n\t*p = x\n\treturn p\n}\nfunc (x ImagesServiceTransform_Type) String() string {\n\treturn proto.EnumName(ImagesServiceTransform_Type_name, int32(x))\n}\nfunc (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, \"ImagesServiceTransform_Type\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ImagesServiceTransform_Type(value)\n\treturn nil\n}\n\ntype InputSettings_ORIENTATION_CORRECTION_TYPE int32\n\nconst (\n\tInputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0\n\tInputSettings_CORRECT_ORIENTATION   InputSettings_ORIENTATION_CORRECTION_TYPE = 1\n)\n\nvar InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{\n\t0: \"UNCHANGED_ORIENTATION\",\n\t1: \"CORRECT_ORIENTATION\",\n}\nvar InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{\n\t\"UNCHANGED_ORIENTATION\": 0,\n\t\"CORRECT_ORIENTATION\":   1,\n}\n\nfunc (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE {\n\tp := new(InputSettings_ORIENTATION_CORRECTION_TYPE)\n\t*p = x\n\treturn p\n}\nfunc (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string {\n\treturn proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x))\n}\nfunc (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, \"InputSettings_ORIENTATION_CORRECTION_TYPE\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = InputSettings_ORIENTATION_CORRECTION_TYPE(value)\n\treturn nil\n}\n\ntype OutputSettings_MIME_TYPE int32\n\nconst (\n\tOutputSettings_PNG  OutputSettings_MIME_TYPE = 0\n\tOutputSettings_JPEG OutputSettings_MIME_TYPE = 1\n\tOutputSettings_WEBP OutputSettings_MIME_TYPE = 2\n)\n\nvar OutputSettings_MIME_TYPE_name = map[int32]string{\n\t0: \"PNG\",\n\t1: \"JPEG\",\n\t2: \"WEBP\",\n}\nvar OutputSettings_MIME_TYPE_value = map[string]int32{\n\t\"PNG\":  0,\n\t\"JPEG\": 1,\n\t\"WEBP\": 2,\n}\n\nfunc (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE {\n\tp := new(OutputSettings_MIME_TYPE)\n\t*p = x\n\treturn p\n}\nfunc (x OutputSettings_MIME_TYPE) String() string {\n\treturn proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x))\n}\nfunc (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, \"OutputSettings_MIME_TYPE\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = OutputSettings_MIME_TYPE(value)\n\treturn nil\n}\n\ntype CompositeImageOptions_ANCHOR int32\n\nconst (\n\tCompositeImageOptions_TOP_LEFT     CompositeImageOptions_ANCHOR = 0\n\tCompositeImageOptions_TOP          CompositeImageOptions_ANCHOR = 1\n\tCompositeImageOptions_TOP_RIGHT    CompositeImageOptions_ANCHOR = 2\n\tCompositeImageOptions_LEFT         CompositeImageOptions_ANCHOR = 3\n\tCompositeImageOptions_CENTER       CompositeImageOptions_ANCHOR = 4\n\tCompositeImageOptions_RIGHT        CompositeImageOptions_ANCHOR = 5\n\tCompositeImageOptions_BOTTOM_LEFT  CompositeImageOptions_ANCHOR = 6\n\tCompositeImageOptions_BOTTOM       CompositeImageOptions_ANCHOR = 7\n\tCompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8\n)\n\nvar CompositeImageOptions_ANCHOR_name = map[int32]string{\n\t0: \"TOP_LEFT\",\n\t1: \"TOP\",\n\t2: \"TOP_RIGHT\",\n\t3: \"LEFT\",\n\t4: \"CENTER\",\n\t5: \"RIGHT\",\n\t6: \"BOTTOM_LEFT\",\n\t7: \"BOTTOM\",\n\t8: \"BOTTOM_RIGHT\",\n}\nvar CompositeImageOptions_ANCHOR_value = map[string]int32{\n\t\"TOP_LEFT\":     0,\n\t\"TOP\":          1,\n\t\"TOP_RIGHT\":    2,\n\t\"LEFT\":         3,\n\t\"CENTER\":       4,\n\t\"RIGHT\":        5,\n\t\"BOTTOM_LEFT\":  6,\n\t\"BOTTOM\":       7,\n\t\"BOTTOM_RIGHT\": 8,\n}\n\nfunc (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR {\n\tp := new(CompositeImageOptions_ANCHOR)\n\t*p = x\n\treturn p\n}\nfunc (x CompositeImageOptions_ANCHOR) String() string {\n\treturn proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x))\n}\nfunc (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, \"CompositeImageOptions_ANCHOR\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CompositeImageOptions_ANCHOR(value)\n\treturn nil\n}\n\ntype ImagesServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ImagesServiceError) Reset()         { *m = ImagesServiceError{} }\nfunc (m *ImagesServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesServiceError) ProtoMessage()    {}\n\ntype ImagesServiceTransform struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ImagesServiceTransform) Reset()         { *m = ImagesServiceTransform{} }\nfunc (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesServiceTransform) ProtoMessage()    {}\n\ntype Transform struct {\n\tWidth            *int32   `protobuf:\"varint,1,opt,name=width\" json:\"width,omitempty\"`\n\tHeight           *int32   `protobuf:\"varint,2,opt,name=height\" json:\"height,omitempty\"`\n\tCropToFit        *bool    `protobuf:\"varint,11,opt,name=crop_to_fit,def=0\" json:\"crop_to_fit,omitempty\"`\n\tCropOffsetX      *float32 `protobuf:\"fixed32,12,opt,name=crop_offset_x,def=0.5\" json:\"crop_offset_x,omitempty\"`\n\tCropOffsetY      *float32 `protobuf:\"fixed32,13,opt,name=crop_offset_y,def=0.5\" json:\"crop_offset_y,omitempty\"`\n\tRotate           *int32   `protobuf:\"varint,3,opt,name=rotate,def=0\" json:\"rotate,omitempty\"`\n\tHorizontalFlip   *bool    `protobuf:\"varint,4,opt,name=horizontal_flip,def=0\" json:\"horizontal_flip,omitempty\"`\n\tVerticalFlip     *bool    `protobuf:\"varint,5,opt,name=vertical_flip,def=0\" json:\"vertical_flip,omitempty\"`\n\tCropLeftX        *float32 `protobuf:\"fixed32,6,opt,name=crop_left_x,def=0\" json:\"crop_left_x,omitempty\"`\n\tCropTopY         *float32 `protobuf:\"fixed32,7,opt,name=crop_top_y,def=0\" json:\"crop_top_y,omitempty\"`\n\tCropRightX       *float32 `protobuf:\"fixed32,8,opt,name=crop_right_x,def=1\" json:\"crop_right_x,omitempty\"`\n\tCropBottomY      *float32 `protobuf:\"fixed32,9,opt,name=crop_bottom_y,def=1\" json:\"crop_bottom_y,omitempty\"`\n\tAutolevels       *bool    `protobuf:\"varint,10,opt,name=autolevels,def=0\" json:\"autolevels,omitempty\"`\n\tAllowStretch     *bool    `protobuf:\"varint,14,opt,name=allow_stretch,def=0\" json:\"allow_stretch,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *Transform) Reset()         { *m = Transform{} }\nfunc (m *Transform) String() string { return proto.CompactTextString(m) }\nfunc (*Transform) ProtoMessage()    {}\n\nconst Default_Transform_CropToFit bool = false\nconst Default_Transform_CropOffsetX float32 = 0.5\nconst Default_Transform_CropOffsetY float32 = 0.5\nconst Default_Transform_Rotate int32 = 0\nconst Default_Transform_HorizontalFlip bool = false\nconst Default_Transform_VerticalFlip bool = false\nconst Default_Transform_CropLeftX float32 = 0\nconst Default_Transform_CropTopY float32 = 0\nconst Default_Transform_CropRightX float32 = 1\nconst Default_Transform_CropBottomY float32 = 1\nconst Default_Transform_Autolevels bool = false\nconst Default_Transform_AllowStretch bool = false\n\nfunc (m *Transform) GetWidth() int32 {\n\tif m != nil && m.Width != nil {\n\t\treturn *m.Width\n\t}\n\treturn 0\n}\n\nfunc (m *Transform) GetHeight() int32 {\n\tif m != nil && m.Height != nil {\n\t\treturn *m.Height\n\t}\n\treturn 0\n}\n\nfunc (m *Transform) GetCropToFit() bool {\n\tif m != nil && m.CropToFit != nil {\n\t\treturn *m.CropToFit\n\t}\n\treturn Default_Transform_CropToFit\n}\n\nfunc (m *Transform) GetCropOffsetX() float32 {\n\tif m != nil && m.CropOffsetX != nil {\n\t\treturn *m.CropOffsetX\n\t}\n\treturn Default_Transform_CropOffsetX\n}\n\nfunc (m *Transform) GetCropOffsetY() float32 {\n\tif m != nil && m.CropOffsetY != nil {\n\t\treturn *m.CropOffsetY\n\t}\n\treturn Default_Transform_CropOffsetY\n}\n\nfunc (m *Transform) GetRotate() int32 {\n\tif m != nil && m.Rotate != nil {\n\t\treturn *m.Rotate\n\t}\n\treturn Default_Transform_Rotate\n}\n\nfunc (m *Transform) GetHorizontalFlip() bool {\n\tif m != nil && m.HorizontalFlip != nil {\n\t\treturn *m.HorizontalFlip\n\t}\n\treturn Default_Transform_HorizontalFlip\n}\n\nfunc (m *Transform) GetVerticalFlip() bool {\n\tif m != nil && m.VerticalFlip != nil {\n\t\treturn *m.VerticalFlip\n\t}\n\treturn Default_Transform_VerticalFlip\n}\n\nfunc (m *Transform) GetCropLeftX() float32 {\n\tif m != nil && m.CropLeftX != nil {\n\t\treturn *m.CropLeftX\n\t}\n\treturn Default_Transform_CropLeftX\n}\n\nfunc (m *Transform) GetCropTopY() float32 {\n\tif m != nil && m.CropTopY != nil {\n\t\treturn *m.CropTopY\n\t}\n\treturn Default_Transform_CropTopY\n}\n\nfunc (m *Transform) GetCropRightX() float32 {\n\tif m != nil && m.CropRightX != nil {\n\t\treturn *m.CropRightX\n\t}\n\treturn Default_Transform_CropRightX\n}\n\nfunc (m *Transform) GetCropBottomY() float32 {\n\tif m != nil && m.CropBottomY != nil {\n\t\treturn *m.CropBottomY\n\t}\n\treturn Default_Transform_CropBottomY\n}\n\nfunc (m *Transform) GetAutolevels() bool {\n\tif m != nil && m.Autolevels != nil {\n\t\treturn *m.Autolevels\n\t}\n\treturn Default_Transform_Autolevels\n}\n\nfunc (m *Transform) GetAllowStretch() bool {\n\tif m != nil && m.AllowStretch != nil {\n\t\treturn *m.AllowStretch\n\t}\n\treturn Default_Transform_AllowStretch\n}\n\ntype ImageData struct {\n\tContent          []byte  `protobuf:\"bytes,1,req,name=content\" json:\"content,omitempty\"`\n\tBlobKey          *string `protobuf:\"bytes,2,opt,name=blob_key\" json:\"blob_key,omitempty\"`\n\tWidth            *int32  `protobuf:\"varint,3,opt,name=width\" json:\"width,omitempty\"`\n\tHeight           *int32  `protobuf:\"varint,4,opt,name=height\" json:\"height,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ImageData) Reset()         { *m = ImageData{} }\nfunc (m *ImageData) String() string { return proto.CompactTextString(m) }\nfunc (*ImageData) ProtoMessage()    {}\n\nfunc (m *ImageData) GetContent() []byte {\n\tif m != nil {\n\t\treturn m.Content\n\t}\n\treturn nil\n}\n\nfunc (m *ImageData) GetBlobKey() string {\n\tif m != nil && m.BlobKey != nil {\n\t\treturn *m.BlobKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *ImageData) GetWidth() int32 {\n\tif m != nil && m.Width != nil {\n\t\treturn *m.Width\n\t}\n\treturn 0\n}\n\nfunc (m *ImageData) GetHeight() int32 {\n\tif m != nil && m.Height != nil {\n\t\treturn *m.Height\n\t}\n\treturn 0\n}\n\ntype InputSettings struct {\n\tCorrectExifOrientation     *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:\"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0\" json:\"correct_exif_orientation,omitempty\"`\n\tParseMetadata              *bool                                      `protobuf:\"varint,2,opt,name=parse_metadata,def=0\" json:\"parse_metadata,omitempty\"`\n\tTransparentSubstitutionRgb *int32                                     `protobuf:\"varint,3,opt,name=transparent_substitution_rgb\" json:\"transparent_substitution_rgb,omitempty\"`\n\tXXX_unrecognized           []byte                                     `json:\"-\"`\n}\n\nfunc (m *InputSettings) Reset()         { *m = InputSettings{} }\nfunc (m *InputSettings) String() string { return proto.CompactTextString(m) }\nfunc (*InputSettings) ProtoMessage()    {}\n\nconst Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION\nconst Default_InputSettings_ParseMetadata bool = false\n\nfunc (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE {\n\tif m != nil && m.CorrectExifOrientation != nil {\n\t\treturn *m.CorrectExifOrientation\n\t}\n\treturn Default_InputSettings_CorrectExifOrientation\n}\n\nfunc (m *InputSettings) GetParseMetadata() bool {\n\tif m != nil && m.ParseMetadata != nil {\n\t\treturn *m.ParseMetadata\n\t}\n\treturn Default_InputSettings_ParseMetadata\n}\n\nfunc (m *InputSettings) GetTransparentSubstitutionRgb() int32 {\n\tif m != nil && m.TransparentSubstitutionRgb != nil {\n\t\treturn *m.TransparentSubstitutionRgb\n\t}\n\treturn 0\n}\n\ntype OutputSettings struct {\n\tMimeType         *OutputSettings_MIME_TYPE `protobuf:\"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0\" json:\"mime_type,omitempty\"`\n\tQuality          *int32                    `protobuf:\"varint,2,opt,name=quality\" json:\"quality,omitempty\"`\n\tXXX_unrecognized []byte                    `json:\"-\"`\n}\n\nfunc (m *OutputSettings) Reset()         { *m = OutputSettings{} }\nfunc (m *OutputSettings) String() string { return proto.CompactTextString(m) }\nfunc (*OutputSettings) ProtoMessage()    {}\n\nconst Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG\n\nfunc (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE {\n\tif m != nil && m.MimeType != nil {\n\t\treturn *m.MimeType\n\t}\n\treturn Default_OutputSettings_MimeType\n}\n\nfunc (m *OutputSettings) GetQuality() int32 {\n\tif m != nil && m.Quality != nil {\n\t\treturn *m.Quality\n\t}\n\treturn 0\n}\n\ntype ImagesTransformRequest struct {\n\tImage            *ImageData      `protobuf:\"bytes,1,req,name=image\" json:\"image,omitempty\"`\n\tTransform        []*Transform    `protobuf:\"bytes,2,rep,name=transform\" json:\"transform,omitempty\"`\n\tOutput           *OutputSettings `protobuf:\"bytes,3,req,name=output\" json:\"output,omitempty\"`\n\tInput            *InputSettings  `protobuf:\"bytes,4,opt,name=input\" json:\"input,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *ImagesTransformRequest) Reset()         { *m = ImagesTransformRequest{} }\nfunc (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesTransformRequest) ProtoMessage()    {}\n\nfunc (m *ImagesTransformRequest) GetImage() *ImageData {\n\tif m != nil {\n\t\treturn m.Image\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesTransformRequest) GetTransform() []*Transform {\n\tif m != nil {\n\t\treturn m.Transform\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesTransformRequest) GetOutput() *OutputSettings {\n\tif m != nil {\n\t\treturn m.Output\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesTransformRequest) GetInput() *InputSettings {\n\tif m != nil {\n\t\treturn m.Input\n\t}\n\treturn nil\n}\n\ntype ImagesTransformResponse struct {\n\tImage            *ImageData `protobuf:\"bytes,1,req,name=image\" json:\"image,omitempty\"`\n\tSourceMetadata   *string    `protobuf:\"bytes,2,opt,name=source_metadata\" json:\"source_metadata,omitempty\"`\n\tXXX_unrecognized []byte     `json:\"-\"`\n}\n\nfunc (m *ImagesTransformResponse) Reset()         { *m = ImagesTransformResponse{} }\nfunc (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesTransformResponse) ProtoMessage()    {}\n\nfunc (m *ImagesTransformResponse) GetImage() *ImageData {\n\tif m != nil {\n\t\treturn m.Image\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesTransformResponse) GetSourceMetadata() string {\n\tif m != nil && m.SourceMetadata != nil {\n\t\treturn *m.SourceMetadata\n\t}\n\treturn \"\"\n}\n\ntype CompositeImageOptions struct {\n\tSourceIndex      *int32                        `protobuf:\"varint,1,req,name=source_index\" json:\"source_index,omitempty\"`\n\tXOffset          *int32                        `protobuf:\"varint,2,req,name=x_offset\" json:\"x_offset,omitempty\"`\n\tYOffset          *int32                        `protobuf:\"varint,3,req,name=y_offset\" json:\"y_offset,omitempty\"`\n\tOpacity          *float32                      `protobuf:\"fixed32,4,req,name=opacity\" json:\"opacity,omitempty\"`\n\tAnchor           *CompositeImageOptions_ANCHOR `protobuf:\"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR\" json:\"anchor,omitempty\"`\n\tXXX_unrecognized []byte                        `json:\"-\"`\n}\n\nfunc (m *CompositeImageOptions) Reset()         { *m = CompositeImageOptions{} }\nfunc (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) }\nfunc (*CompositeImageOptions) ProtoMessage()    {}\n\nfunc (m *CompositeImageOptions) GetSourceIndex() int32 {\n\tif m != nil && m.SourceIndex != nil {\n\t\treturn *m.SourceIndex\n\t}\n\treturn 0\n}\n\nfunc (m *CompositeImageOptions) GetXOffset() int32 {\n\tif m != nil && m.XOffset != nil {\n\t\treturn *m.XOffset\n\t}\n\treturn 0\n}\n\nfunc (m *CompositeImageOptions) GetYOffset() int32 {\n\tif m != nil && m.YOffset != nil {\n\t\treturn *m.YOffset\n\t}\n\treturn 0\n}\n\nfunc (m *CompositeImageOptions) GetOpacity() float32 {\n\tif m != nil && m.Opacity != nil {\n\t\treturn *m.Opacity\n\t}\n\treturn 0\n}\n\nfunc (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR {\n\tif m != nil && m.Anchor != nil {\n\t\treturn *m.Anchor\n\t}\n\treturn CompositeImageOptions_TOP_LEFT\n}\n\ntype ImagesCanvas struct {\n\tWidth            *int32          `protobuf:\"varint,1,req,name=width\" json:\"width,omitempty\"`\n\tHeight           *int32          `protobuf:\"varint,2,req,name=height\" json:\"height,omitempty\"`\n\tOutput           *OutputSettings `protobuf:\"bytes,3,req,name=output\" json:\"output,omitempty\"`\n\tColor            *int32          `protobuf:\"varint,4,opt,name=color,def=-1\" json:\"color,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *ImagesCanvas) Reset()         { *m = ImagesCanvas{} }\nfunc (m *ImagesCanvas) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesCanvas) ProtoMessage()    {}\n\nconst Default_ImagesCanvas_Color int32 = -1\n\nfunc (m *ImagesCanvas) GetWidth() int32 {\n\tif m != nil && m.Width != nil {\n\t\treturn *m.Width\n\t}\n\treturn 0\n}\n\nfunc (m *ImagesCanvas) GetHeight() int32 {\n\tif m != nil && m.Height != nil {\n\t\treturn *m.Height\n\t}\n\treturn 0\n}\n\nfunc (m *ImagesCanvas) GetOutput() *OutputSettings {\n\tif m != nil {\n\t\treturn m.Output\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesCanvas) GetColor() int32 {\n\tif m != nil && m.Color != nil {\n\t\treturn *m.Color\n\t}\n\treturn Default_ImagesCanvas_Color\n}\n\ntype ImagesCompositeRequest struct {\n\tImage            []*ImageData             `protobuf:\"bytes,1,rep,name=image\" json:\"image,omitempty\"`\n\tOptions          []*CompositeImageOptions `protobuf:\"bytes,2,rep,name=options\" json:\"options,omitempty\"`\n\tCanvas           *ImagesCanvas            `protobuf:\"bytes,3,req,name=canvas\" json:\"canvas,omitempty\"`\n\tXXX_unrecognized []byte                   `json:\"-\"`\n}\n\nfunc (m *ImagesCompositeRequest) Reset()         { *m = ImagesCompositeRequest{} }\nfunc (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesCompositeRequest) ProtoMessage()    {}\n\nfunc (m *ImagesCompositeRequest) GetImage() []*ImageData {\n\tif m != nil {\n\t\treturn m.Image\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions {\n\tif m != nil {\n\t\treturn m.Options\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas {\n\tif m != nil {\n\t\treturn m.Canvas\n\t}\n\treturn nil\n}\n\ntype ImagesCompositeResponse struct {\n\tImage            *ImageData `protobuf:\"bytes,1,req,name=image\" json:\"image,omitempty\"`\n\tXXX_unrecognized []byte     `json:\"-\"`\n}\n\nfunc (m *ImagesCompositeResponse) Reset()         { *m = ImagesCompositeResponse{} }\nfunc (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesCompositeResponse) ProtoMessage()    {}\n\nfunc (m *ImagesCompositeResponse) GetImage() *ImageData {\n\tif m != nil {\n\t\treturn m.Image\n\t}\n\treturn nil\n}\n\ntype ImagesHistogramRequest struct {\n\tImage            *ImageData `protobuf:\"bytes,1,req,name=image\" json:\"image,omitempty\"`\n\tXXX_unrecognized []byte     `json:\"-\"`\n}\n\nfunc (m *ImagesHistogramRequest) Reset()         { *m = ImagesHistogramRequest{} }\nfunc (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesHistogramRequest) ProtoMessage()    {}\n\nfunc (m *ImagesHistogramRequest) GetImage() *ImageData {\n\tif m != nil {\n\t\treturn m.Image\n\t}\n\treturn nil\n}\n\ntype ImagesHistogram struct {\n\tRed              []int32 `protobuf:\"varint,1,rep,name=red\" json:\"red,omitempty\"`\n\tGreen            []int32 `protobuf:\"varint,2,rep,name=green\" json:\"green,omitempty\"`\n\tBlue             []int32 `protobuf:\"varint,3,rep,name=blue\" json:\"blue,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ImagesHistogram) Reset()         { *m = ImagesHistogram{} }\nfunc (m *ImagesHistogram) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesHistogram) ProtoMessage()    {}\n\nfunc (m *ImagesHistogram) GetRed() []int32 {\n\tif m != nil {\n\t\treturn m.Red\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesHistogram) GetGreen() []int32 {\n\tif m != nil {\n\t\treturn m.Green\n\t}\n\treturn nil\n}\n\nfunc (m *ImagesHistogram) GetBlue() []int32 {\n\tif m != nil {\n\t\treturn m.Blue\n\t}\n\treturn nil\n}\n\ntype ImagesHistogramResponse struct {\n\tHistogram        *ImagesHistogram `protobuf:\"bytes,1,req,name=histogram\" json:\"histogram,omitempty\"`\n\tXXX_unrecognized []byte           `json:\"-\"`\n}\n\nfunc (m *ImagesHistogramResponse) Reset()         { *m = ImagesHistogramResponse{} }\nfunc (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesHistogramResponse) ProtoMessage()    {}\n\nfunc (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram {\n\tif m != nil {\n\t\treturn m.Histogram\n\t}\n\treturn nil\n}\n\ntype ImagesGetUrlBaseRequest struct {\n\tBlobKey          *string `protobuf:\"bytes,1,req,name=blob_key\" json:\"blob_key,omitempty\"`\n\tCreateSecureUrl  *bool   `protobuf:\"varint,2,opt,name=create_secure_url,def=0\" json:\"create_secure_url,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ImagesGetUrlBaseRequest) Reset()         { *m = ImagesGetUrlBaseRequest{} }\nfunc (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesGetUrlBaseRequest) ProtoMessage()    {}\n\nconst Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false\n\nfunc (m *ImagesGetUrlBaseRequest) GetBlobKey() string {\n\tif m != nil && m.BlobKey != nil {\n\t\treturn *m.BlobKey\n\t}\n\treturn \"\"\n}\n\nfunc (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool {\n\tif m != nil && m.CreateSecureUrl != nil {\n\t\treturn *m.CreateSecureUrl\n\t}\n\treturn Default_ImagesGetUrlBaseRequest_CreateSecureUrl\n}\n\ntype ImagesGetUrlBaseResponse struct {\n\tUrl              *string `protobuf:\"bytes,1,req,name=url\" json:\"url,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ImagesGetUrlBaseResponse) Reset()         { *m = ImagesGetUrlBaseResponse{} }\nfunc (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesGetUrlBaseResponse) ProtoMessage()    {}\n\nfunc (m *ImagesGetUrlBaseResponse) GetUrl() string {\n\tif m != nil && m.Url != nil {\n\t\treturn *m.Url\n\t}\n\treturn \"\"\n}\n\ntype ImagesDeleteUrlBaseRequest struct {\n\tBlobKey          *string `protobuf:\"bytes,1,req,name=blob_key\" json:\"blob_key,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ImagesDeleteUrlBaseRequest) Reset()         { *m = ImagesDeleteUrlBaseRequest{} }\nfunc (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesDeleteUrlBaseRequest) ProtoMessage()    {}\n\nfunc (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string {\n\tif m != nil && m.BlobKey != nil {\n\t\treturn *m.BlobKey\n\t}\n\treturn \"\"\n}\n\ntype ImagesDeleteUrlBaseResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ImagesDeleteUrlBaseResponse) Reset()         { *m = ImagesDeleteUrlBaseResponse{} }\nfunc (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ImagesDeleteUrlBaseResponse) ProtoMessage()    {}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/image/images_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"image\";\n\npackage appengine;\n\nmessage ImagesServiceError {\n  enum ErrorCode {\n    UNSPECIFIED_ERROR = 1;\n    BAD_TRANSFORM_DATA = 2;\n    NOT_IMAGE = 3;\n    BAD_IMAGE_DATA = 4;\n    IMAGE_TOO_LARGE = 5;\n    INVALID_BLOB_KEY = 6;\n    ACCESS_DENIED = 7;\n    OBJECT_NOT_FOUND = 8;\n  }\n}\n\nmessage ImagesServiceTransform {\n  enum Type {\n    RESIZE = 1;\n    ROTATE = 2;\n    HORIZONTAL_FLIP = 3;\n    VERTICAL_FLIP = 4;\n    CROP = 5;\n    IM_FEELING_LUCKY = 6;\n  }\n}\n\nmessage Transform {\n  optional int32 width = 1;\n  optional int32 height = 2;\n  optional bool crop_to_fit = 11 [default = false];\n  optional float crop_offset_x = 12 [default = 0.5];\n  optional float crop_offset_y = 13 [default = 0.5];\n\n  optional int32 rotate = 3 [default = 0];\n\n  optional bool horizontal_flip = 4 [default = false];\n\n  optional bool vertical_flip = 5 [default = false];\n\n  optional float crop_left_x = 6 [default = 0.0];\n  optional float crop_top_y = 7 [default = 0.0];\n  optional float crop_right_x = 8 [default = 1.0];\n  optional float crop_bottom_y = 9 [default = 1.0];\n\n  optional bool autolevels = 10 [default = false];\n\n  optional bool allow_stretch = 14 [default = false];\n}\n\nmessage ImageData {\n  required bytes content = 1 [ctype=CORD];\n  optional string blob_key = 2;\n\n  optional int32 width = 3;\n  optional int32 height = 4;\n}\n\nmessage InputSettings {\n  enum ORIENTATION_CORRECTION_TYPE {\n    UNCHANGED_ORIENTATION = 0;\n    CORRECT_ORIENTATION = 1;\n  }\n  optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1\n      [default=UNCHANGED_ORIENTATION];\n  optional bool parse_metadata = 2 [default=false];\n  optional int32 transparent_substitution_rgb = 3;\n}\n\nmessage OutputSettings {\n  enum MIME_TYPE {\n    PNG = 0;\n    JPEG = 1;\n    WEBP = 2;\n  }\n\n  optional MIME_TYPE mime_type = 1 [default=PNG];\n  optional int32 quality = 2;\n}\n\nmessage ImagesTransformRequest {\n  required ImageData image = 1;\n  repeated Transform transform = 2;\n  required OutputSettings output = 3;\n  optional InputSettings input = 4;\n}\n\nmessage ImagesTransformResponse {\n  required ImageData image = 1;\n  optional string source_metadata = 2;\n}\n\nmessage CompositeImageOptions {\n  required int32 source_index = 1;\n  required int32 x_offset = 2;\n  required int32 y_offset = 3;\n  required float opacity = 4;\n\n  enum ANCHOR {\n    TOP_LEFT = 0;\n    TOP = 1;\n    TOP_RIGHT = 2;\n    LEFT = 3;\n    CENTER = 4;\n    RIGHT = 5;\n    BOTTOM_LEFT = 6;\n    BOTTOM = 7;\n    BOTTOM_RIGHT = 8;\n  }\n\n  required ANCHOR anchor = 5;\n}\n\nmessage ImagesCanvas {\n  required int32 width = 1;\n  required int32 height = 2;\n  required OutputSettings output = 3;\n  optional int32 color = 4 [default=-1];\n}\n\nmessage ImagesCompositeRequest {\n  repeated ImageData image = 1;\n  repeated CompositeImageOptions options = 2;\n  required ImagesCanvas canvas = 3;\n}\n\nmessage ImagesCompositeResponse {\n  required ImageData image = 1;\n}\n\nmessage ImagesHistogramRequest {\n  required ImageData image = 1;\n}\n\nmessage ImagesHistogram {\n  repeated int32 red = 1;\n  repeated int32 green = 2;\n  repeated int32 blue = 3;\n}\n\nmessage ImagesHistogramResponse {\n  required ImagesHistogram histogram = 1;\n}\n\nmessage ImagesGetUrlBaseRequest {\n  required string blob_key = 1;\n\n  optional bool create_secure_url = 2 [default = false];\n}\n\nmessage ImagesGetUrlBaseResponse {\n  required string url = 1;\n}\n\nmessage ImagesDeleteUrlBaseRequest {\n  required string blob_key = 1;\n}\n\nmessage ImagesDeleteUrlBaseResponse {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/internal.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package internal provides support for package appengine.\n//\n// Programs should not use this package directly. Its API is not stable.\n// Use packages appengine and appengine/* instead.\npackage internal\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tremotepb \"google.golang.org/appengine/internal/remote_api\"\n)\n\n// errorCodeMaps is a map of service name to the error code map for the service.\nvar errorCodeMaps = make(map[string]map[int32]string)\n\n// RegisterErrorCodeMap is called from API implementations to register their\n// error code map. This should only be called from init functions.\nfunc RegisterErrorCodeMap(service string, m map[int32]string) {\n\terrorCodeMaps[service] = m\n}\n\ntype timeoutCodeKey struct {\n\tservice string\n\tcode    int32\n}\n\n// timeoutCodes is the set of service+code pairs that represent timeouts.\nvar timeoutCodes = make(map[timeoutCodeKey]bool)\n\nfunc RegisterTimeoutErrorCode(service string, code int32) {\n\ttimeoutCodes[timeoutCodeKey{service, code}] = true\n}\n\n// APIError is the type returned by appengine.Context's Call method\n// when an API call fails in an API-specific way. This may be, for instance,\n// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.\ntype APIError struct {\n\tService string\n\tDetail  string\n\tCode    int32 // API-specific error code\n}\n\nfunc (e *APIError) Error() string {\n\tif e.Code == 0 {\n\t\tif e.Detail == \"\" {\n\t\t\treturn \"APIError <empty>\"\n\t\t}\n\t\treturn e.Detail\n\t}\n\ts := fmt.Sprintf(\"API error %d\", e.Code)\n\tif m, ok := errorCodeMaps[e.Service]; ok {\n\t\ts += \" (\" + e.Service + \": \" + m[e.Code] + \")\"\n\t} else {\n\t\t// Shouldn't happen, but provide a bit more detail if it does.\n\t\ts = e.Service + \" \" + s\n\t}\n\tif e.Detail != \"\" {\n\t\ts += \": \" + e.Detail\n\t}\n\treturn s\n}\n\nfunc (e *APIError) IsTimeout() bool {\n\treturn timeoutCodes[timeoutCodeKey{e.Service, e.Code}]\n}\n\n// CallError is the type returned by appengine.Context's Call method when an\n// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.\ntype CallError struct {\n\tDetail string\n\tCode   int32\n\t// TODO: Remove this if we get a distinguishable error code.\n\tTimeout bool\n}\n\nfunc (e *CallError) Error() string {\n\tvar msg string\n\tswitch remotepb.RpcError_ErrorCode(e.Code) {\n\tcase remotepb.RpcError_UNKNOWN:\n\t\treturn e.Detail\n\tcase remotepb.RpcError_OVER_QUOTA:\n\t\tmsg = \"Over quota\"\n\tcase remotepb.RpcError_CAPABILITY_DISABLED:\n\t\tmsg = \"Capability disabled\"\n\tcase remotepb.RpcError_CANCELLED:\n\t\tmsg = \"Canceled\"\n\tdefault:\n\t\tmsg = fmt.Sprintf(\"Call error %d\", e.Code)\n\t}\n\ts := msg + \": \" + e.Detail\n\tif e.Timeout {\n\t\ts += \" (timeout)\"\n\t}\n\treturn s\n}\n\nfunc (e *CallError) IsTimeout() bool {\n\treturn e.Timeout\n}\n\nfunc Main() {\n\tinstallHealthChecker(http.DefaultServeMux)\n\n\tport := \"8080\"\n\tif s := os.Getenv(\"PORT\"); s != \"\" {\n\t\tport = s\n\t}\n\n\tif err := http.ListenAndServe(\":\"+port, http.HandlerFunc(handleHTTP)); err != nil {\n\t\tlog.Fatalf(\"http.ListenAndServe: %v\", err)\n\t}\n}\n\nfunc installHealthChecker(mux *http.ServeMux) {\n\t// If no health check handler has been installed by this point, add a trivial one.\n\tconst healthPath = \"/_ah/health\"\n\threq := &http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: &url.URL{\n\t\t\tPath: healthPath,\n\t\t},\n\t}\n\tif _, pat := mux.Handler(hreq); pat != healthPath {\n\t\tmux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {\n\t\t\tio.WriteString(w, \"ok\")\n\t\t})\n\t}\n}\n\n// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.\n// The function should be prepared to be called on the same message more than once; it should only modify the\n// RPC request the first time.\nvar NamespaceMods = make(map[string]func(m proto.Message, namespace string))\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/log/log_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/log/log_service.proto\n// DO NOT EDIT!\n\n/*\nPackage log is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/log/log_service.proto\n\nIt has these top-level messages:\n\tLogServiceError\n\tUserAppLogLine\n\tUserAppLogGroup\n\tFlushRequest\n\tSetStatusRequest\n\tLogOffset\n\tLogLine\n\tRequestLog\n\tLogModuleVersion\n\tLogReadRequest\n\tLogReadResponse\n\tLogUsageRecord\n\tLogUsageRequest\n\tLogUsageResponse\n*/\npackage log\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype LogServiceError_ErrorCode int32\n\nconst (\n\tLogServiceError_OK              LogServiceError_ErrorCode = 0\n\tLogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1\n\tLogServiceError_STORAGE_ERROR   LogServiceError_ErrorCode = 2\n)\n\nvar LogServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"INVALID_REQUEST\",\n\t2: \"STORAGE_ERROR\",\n}\nvar LogServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":              0,\n\t\"INVALID_REQUEST\": 1,\n\t\"STORAGE_ERROR\":   2,\n}\n\nfunc (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {\n\tp := new(LogServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x LogServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(LogServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, \"LogServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = LogServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype LogServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *LogServiceError) Reset()         { *m = LogServiceError{} }\nfunc (m *LogServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*LogServiceError) ProtoMessage()    {}\n\ntype UserAppLogLine struct {\n\tTimestampUsec    *int64  `protobuf:\"varint,1,req,name=timestamp_usec\" json:\"timestamp_usec,omitempty\"`\n\tLevel            *int64  `protobuf:\"varint,2,req,name=level\" json:\"level,omitempty\"`\n\tMessage          *string `protobuf:\"bytes,3,req,name=message\" json:\"message,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *UserAppLogLine) Reset()         { *m = UserAppLogLine{} }\nfunc (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }\nfunc (*UserAppLogLine) ProtoMessage()    {}\n\nfunc (m *UserAppLogLine) GetTimestampUsec() int64 {\n\tif m != nil && m.TimestampUsec != nil {\n\t\treturn *m.TimestampUsec\n\t}\n\treturn 0\n}\n\nfunc (m *UserAppLogLine) GetLevel() int64 {\n\tif m != nil && m.Level != nil {\n\t\treturn *m.Level\n\t}\n\treturn 0\n}\n\nfunc (m *UserAppLogLine) GetMessage() string {\n\tif m != nil && m.Message != nil {\n\t\treturn *m.Message\n\t}\n\treturn \"\"\n}\n\ntype UserAppLogGroup struct {\n\tLogLine          []*UserAppLogLine `protobuf:\"bytes,2,rep,name=log_line\" json:\"log_line,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *UserAppLogGroup) Reset()         { *m = UserAppLogGroup{} }\nfunc (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }\nfunc (*UserAppLogGroup) ProtoMessage()    {}\n\nfunc (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {\n\tif m != nil {\n\t\treturn m.LogLine\n\t}\n\treturn nil\n}\n\ntype FlushRequest struct {\n\tLogs             []byte `protobuf:\"bytes,1,opt,name=logs\" json:\"logs,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *FlushRequest) Reset()         { *m = FlushRequest{} }\nfunc (m *FlushRequest) String() string { return proto.CompactTextString(m) }\nfunc (*FlushRequest) ProtoMessage()    {}\n\nfunc (m *FlushRequest) GetLogs() []byte {\n\tif m != nil {\n\t\treturn m.Logs\n\t}\n\treturn nil\n}\n\ntype SetStatusRequest struct {\n\tStatus           *string `protobuf:\"bytes,1,req,name=status\" json:\"status,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SetStatusRequest) Reset()         { *m = SetStatusRequest{} }\nfunc (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SetStatusRequest) ProtoMessage()    {}\n\nfunc (m *SetStatusRequest) GetStatus() string {\n\tif m != nil && m.Status != nil {\n\t\treturn *m.Status\n\t}\n\treturn \"\"\n}\n\ntype LogOffset struct {\n\tRequestId        []byte `protobuf:\"bytes,1,opt,name=request_id\" json:\"request_id,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *LogOffset) Reset()         { *m = LogOffset{} }\nfunc (m *LogOffset) String() string { return proto.CompactTextString(m) }\nfunc (*LogOffset) ProtoMessage()    {}\n\nfunc (m *LogOffset) GetRequestId() []byte {\n\tif m != nil {\n\t\treturn m.RequestId\n\t}\n\treturn nil\n}\n\ntype LogLine struct {\n\tTime             *int64  `protobuf:\"varint,1,req,name=time\" json:\"time,omitempty\"`\n\tLevel            *int32  `protobuf:\"varint,2,req,name=level\" json:\"level,omitempty\"`\n\tLogMessage       *string `protobuf:\"bytes,3,req,name=log_message\" json:\"log_message,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *LogLine) Reset()         { *m = LogLine{} }\nfunc (m *LogLine) String() string { return proto.CompactTextString(m) }\nfunc (*LogLine) ProtoMessage()    {}\n\nfunc (m *LogLine) GetTime() int64 {\n\tif m != nil && m.Time != nil {\n\t\treturn *m.Time\n\t}\n\treturn 0\n}\n\nfunc (m *LogLine) GetLevel() int32 {\n\tif m != nil && m.Level != nil {\n\t\treturn *m.Level\n\t}\n\treturn 0\n}\n\nfunc (m *LogLine) GetLogMessage() string {\n\tif m != nil && m.LogMessage != nil {\n\t\treturn *m.LogMessage\n\t}\n\treturn \"\"\n}\n\ntype RequestLog struct {\n\tAppId                   *string    `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tModuleId                *string    `protobuf:\"bytes,37,opt,name=module_id,def=default\" json:\"module_id,omitempty\"`\n\tVersionId               *string    `protobuf:\"bytes,2,req,name=version_id\" json:\"version_id,omitempty\"`\n\tRequestId               []byte     `protobuf:\"bytes,3,req,name=request_id\" json:\"request_id,omitempty\"`\n\tOffset                  *LogOffset `protobuf:\"bytes,35,opt,name=offset\" json:\"offset,omitempty\"`\n\tIp                      *string    `protobuf:\"bytes,4,req,name=ip\" json:\"ip,omitempty\"`\n\tNickname                *string    `protobuf:\"bytes,5,opt,name=nickname\" json:\"nickname,omitempty\"`\n\tStartTime               *int64     `protobuf:\"varint,6,req,name=start_time\" json:\"start_time,omitempty\"`\n\tEndTime                 *int64     `protobuf:\"varint,7,req,name=end_time\" json:\"end_time,omitempty\"`\n\tLatency                 *int64     `protobuf:\"varint,8,req,name=latency\" json:\"latency,omitempty\"`\n\tMcycles                 *int64     `protobuf:\"varint,9,req,name=mcycles\" json:\"mcycles,omitempty\"`\n\tMethod                  *string    `protobuf:\"bytes,10,req,name=method\" json:\"method,omitempty\"`\n\tResource                *string    `protobuf:\"bytes,11,req,name=resource\" json:\"resource,omitempty\"`\n\tHttpVersion             *string    `protobuf:\"bytes,12,req,name=http_version\" json:\"http_version,omitempty\"`\n\tStatus                  *int32     `protobuf:\"varint,13,req,name=status\" json:\"status,omitempty\"`\n\tResponseSize            *int64     `protobuf:\"varint,14,req,name=response_size\" json:\"response_size,omitempty\"`\n\tReferrer                *string    `protobuf:\"bytes,15,opt,name=referrer\" json:\"referrer,omitempty\"`\n\tUserAgent               *string    `protobuf:\"bytes,16,opt,name=user_agent\" json:\"user_agent,omitempty\"`\n\tUrlMapEntry             *string    `protobuf:\"bytes,17,req,name=url_map_entry\" json:\"url_map_entry,omitempty\"`\n\tCombined                *string    `protobuf:\"bytes,18,req,name=combined\" json:\"combined,omitempty\"`\n\tApiMcycles              *int64     `protobuf:\"varint,19,opt,name=api_mcycles\" json:\"api_mcycles,omitempty\"`\n\tHost                    *string    `protobuf:\"bytes,20,opt,name=host\" json:\"host,omitempty\"`\n\tCost                    *float64   `protobuf:\"fixed64,21,opt,name=cost\" json:\"cost,omitempty\"`\n\tTaskQueueName           *string    `protobuf:\"bytes,22,opt,name=task_queue_name\" json:\"task_queue_name,omitempty\"`\n\tTaskName                *string    `protobuf:\"bytes,23,opt,name=task_name\" json:\"task_name,omitempty\"`\n\tWasLoadingRequest       *bool      `protobuf:\"varint,24,opt,name=was_loading_request\" json:\"was_loading_request,omitempty\"`\n\tPendingTime             *int64     `protobuf:\"varint,25,opt,name=pending_time\" json:\"pending_time,omitempty\"`\n\tReplicaIndex            *int32     `protobuf:\"varint,26,opt,name=replica_index,def=-1\" json:\"replica_index,omitempty\"`\n\tFinished                *bool      `protobuf:\"varint,27,opt,name=finished,def=1\" json:\"finished,omitempty\"`\n\tCloneKey                []byte     `protobuf:\"bytes,28,opt,name=clone_key\" json:\"clone_key,omitempty\"`\n\tLine                    []*LogLine `protobuf:\"bytes,29,rep,name=line\" json:\"line,omitempty\"`\n\tLinesIncomplete         *bool      `protobuf:\"varint,36,opt,name=lines_incomplete\" json:\"lines_incomplete,omitempty\"`\n\tAppEngineRelease        []byte     `protobuf:\"bytes,38,opt,name=app_engine_release\" json:\"app_engine_release,omitempty\"`\n\tExitReason              *int32     `protobuf:\"varint,30,opt,name=exit_reason\" json:\"exit_reason,omitempty\"`\n\tWasThrottledForTime     *bool      `protobuf:\"varint,31,opt,name=was_throttled_for_time\" json:\"was_throttled_for_time,omitempty\"`\n\tWasThrottledForRequests *bool      `protobuf:\"varint,32,opt,name=was_throttled_for_requests\" json:\"was_throttled_for_requests,omitempty\"`\n\tThrottledTime           *int64     `protobuf:\"varint,33,opt,name=throttled_time\" json:\"throttled_time,omitempty\"`\n\tServerName              []byte     `protobuf:\"bytes,34,opt,name=server_name\" json:\"server_name,omitempty\"`\n\tXXX_unrecognized        []byte     `json:\"-\"`\n}\n\nfunc (m *RequestLog) Reset()         { *m = RequestLog{} }\nfunc (m *RequestLog) String() string { return proto.CompactTextString(m) }\nfunc (*RequestLog) ProtoMessage()    {}\n\nconst Default_RequestLog_ModuleId string = \"default\"\nconst Default_RequestLog_ReplicaIndex int32 = -1\nconst Default_RequestLog_Finished bool = true\n\nfunc (m *RequestLog) GetAppId() string {\n\tif m != nil && m.AppId != nil {\n\t\treturn *m.AppId\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetModuleId() string {\n\tif m != nil && m.ModuleId != nil {\n\t\treturn *m.ModuleId\n\t}\n\treturn Default_RequestLog_ModuleId\n}\n\nfunc (m *RequestLog) GetVersionId() string {\n\tif m != nil && m.VersionId != nil {\n\t\treturn *m.VersionId\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetRequestId() []byte {\n\tif m != nil {\n\t\treturn m.RequestId\n\t}\n\treturn nil\n}\n\nfunc (m *RequestLog) GetOffset() *LogOffset {\n\tif m != nil {\n\t\treturn m.Offset\n\t}\n\treturn nil\n}\n\nfunc (m *RequestLog) GetIp() string {\n\tif m != nil && m.Ip != nil {\n\t\treturn *m.Ip\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetNickname() string {\n\tif m != nil && m.Nickname != nil {\n\t\treturn *m.Nickname\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetStartTime() int64 {\n\tif m != nil && m.StartTime != nil {\n\t\treturn *m.StartTime\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetEndTime() int64 {\n\tif m != nil && m.EndTime != nil {\n\t\treturn *m.EndTime\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetLatency() int64 {\n\tif m != nil && m.Latency != nil {\n\t\treturn *m.Latency\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetMcycles() int64 {\n\tif m != nil && m.Mcycles != nil {\n\t\treturn *m.Mcycles\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetMethod() string {\n\tif m != nil && m.Method != nil {\n\t\treturn *m.Method\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetResource() string {\n\tif m != nil && m.Resource != nil {\n\t\treturn *m.Resource\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetHttpVersion() string {\n\tif m != nil && m.HttpVersion != nil {\n\t\treturn *m.HttpVersion\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetStatus() int32 {\n\tif m != nil && m.Status != nil {\n\t\treturn *m.Status\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetResponseSize() int64 {\n\tif m != nil && m.ResponseSize != nil {\n\t\treturn *m.ResponseSize\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetReferrer() string {\n\tif m != nil && m.Referrer != nil {\n\t\treturn *m.Referrer\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetUserAgent() string {\n\tif m != nil && m.UserAgent != nil {\n\t\treturn *m.UserAgent\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetUrlMapEntry() string {\n\tif m != nil && m.UrlMapEntry != nil {\n\t\treturn *m.UrlMapEntry\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetCombined() string {\n\tif m != nil && m.Combined != nil {\n\t\treturn *m.Combined\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetApiMcycles() int64 {\n\tif m != nil && m.ApiMcycles != nil {\n\t\treturn *m.ApiMcycles\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetHost() string {\n\tif m != nil && m.Host != nil {\n\t\treturn *m.Host\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetCost() float64 {\n\tif m != nil && m.Cost != nil {\n\t\treturn *m.Cost\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetTaskQueueName() string {\n\tif m != nil && m.TaskQueueName != nil {\n\t\treturn *m.TaskQueueName\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetTaskName() string {\n\tif m != nil && m.TaskName != nil {\n\t\treturn *m.TaskName\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestLog) GetWasLoadingRequest() bool {\n\tif m != nil && m.WasLoadingRequest != nil {\n\t\treturn *m.WasLoadingRequest\n\t}\n\treturn false\n}\n\nfunc (m *RequestLog) GetPendingTime() int64 {\n\tif m != nil && m.PendingTime != nil {\n\t\treturn *m.PendingTime\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetReplicaIndex() int32 {\n\tif m != nil && m.ReplicaIndex != nil {\n\t\treturn *m.ReplicaIndex\n\t}\n\treturn Default_RequestLog_ReplicaIndex\n}\n\nfunc (m *RequestLog) GetFinished() bool {\n\tif m != nil && m.Finished != nil {\n\t\treturn *m.Finished\n\t}\n\treturn Default_RequestLog_Finished\n}\n\nfunc (m *RequestLog) GetCloneKey() []byte {\n\tif m != nil {\n\t\treturn m.CloneKey\n\t}\n\treturn nil\n}\n\nfunc (m *RequestLog) GetLine() []*LogLine {\n\tif m != nil {\n\t\treturn m.Line\n\t}\n\treturn nil\n}\n\nfunc (m *RequestLog) GetLinesIncomplete() bool {\n\tif m != nil && m.LinesIncomplete != nil {\n\t\treturn *m.LinesIncomplete\n\t}\n\treturn false\n}\n\nfunc (m *RequestLog) GetAppEngineRelease() []byte {\n\tif m != nil {\n\t\treturn m.AppEngineRelease\n\t}\n\treturn nil\n}\n\nfunc (m *RequestLog) GetExitReason() int32 {\n\tif m != nil && m.ExitReason != nil {\n\t\treturn *m.ExitReason\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetWasThrottledForTime() bool {\n\tif m != nil && m.WasThrottledForTime != nil {\n\t\treturn *m.WasThrottledForTime\n\t}\n\treturn false\n}\n\nfunc (m *RequestLog) GetWasThrottledForRequests() bool {\n\tif m != nil && m.WasThrottledForRequests != nil {\n\t\treturn *m.WasThrottledForRequests\n\t}\n\treturn false\n}\n\nfunc (m *RequestLog) GetThrottledTime() int64 {\n\tif m != nil && m.ThrottledTime != nil {\n\t\treturn *m.ThrottledTime\n\t}\n\treturn 0\n}\n\nfunc (m *RequestLog) GetServerName() []byte {\n\tif m != nil {\n\t\treturn m.ServerName\n\t}\n\treturn nil\n}\n\ntype LogModuleVersion struct {\n\tModuleId         *string `protobuf:\"bytes,1,opt,name=module_id,def=default\" json:\"module_id,omitempty\"`\n\tVersionId        *string `protobuf:\"bytes,2,opt,name=version_id\" json:\"version_id,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *LogModuleVersion) Reset()         { *m = LogModuleVersion{} }\nfunc (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }\nfunc (*LogModuleVersion) ProtoMessage()    {}\n\nconst Default_LogModuleVersion_ModuleId string = \"default\"\n\nfunc (m *LogModuleVersion) GetModuleId() string {\n\tif m != nil && m.ModuleId != nil {\n\t\treturn *m.ModuleId\n\t}\n\treturn Default_LogModuleVersion_ModuleId\n}\n\nfunc (m *LogModuleVersion) GetVersionId() string {\n\tif m != nil && m.VersionId != nil {\n\t\treturn *m.VersionId\n\t}\n\treturn \"\"\n}\n\ntype LogReadRequest struct {\n\tAppId             *string             `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tVersionId         []string            `protobuf:\"bytes,2,rep,name=version_id\" json:\"version_id,omitempty\"`\n\tModuleVersion     []*LogModuleVersion `protobuf:\"bytes,19,rep,name=module_version\" json:\"module_version,omitempty\"`\n\tStartTime         *int64              `protobuf:\"varint,3,opt,name=start_time\" json:\"start_time,omitempty\"`\n\tEndTime           *int64              `protobuf:\"varint,4,opt,name=end_time\" json:\"end_time,omitempty\"`\n\tOffset            *LogOffset          `protobuf:\"bytes,5,opt,name=offset\" json:\"offset,omitempty\"`\n\tRequestId         [][]byte            `protobuf:\"bytes,6,rep,name=request_id\" json:\"request_id,omitempty\"`\n\tMinimumLogLevel   *int32              `protobuf:\"varint,7,opt,name=minimum_log_level\" json:\"minimum_log_level,omitempty\"`\n\tIncludeIncomplete *bool               `protobuf:\"varint,8,opt,name=include_incomplete\" json:\"include_incomplete,omitempty\"`\n\tCount             *int64              `protobuf:\"varint,9,opt,name=count\" json:\"count,omitempty\"`\n\tCombinedLogRegex  *string             `protobuf:\"bytes,14,opt,name=combined_log_regex\" json:\"combined_log_regex,omitempty\"`\n\tHostRegex         *string             `protobuf:\"bytes,15,opt,name=host_regex\" json:\"host_regex,omitempty\"`\n\tReplicaIndex      *int32              `protobuf:\"varint,16,opt,name=replica_index\" json:\"replica_index,omitempty\"`\n\tIncludeAppLogs    *bool               `protobuf:\"varint,10,opt,name=include_app_logs\" json:\"include_app_logs,omitempty\"`\n\tAppLogsPerRequest *int32              `protobuf:\"varint,17,opt,name=app_logs_per_request\" json:\"app_logs_per_request,omitempty\"`\n\tIncludeHost       *bool               `protobuf:\"varint,11,opt,name=include_host\" json:\"include_host,omitempty\"`\n\tIncludeAll        *bool               `protobuf:\"varint,12,opt,name=include_all\" json:\"include_all,omitempty\"`\n\tCacheIterator     *bool               `protobuf:\"varint,13,opt,name=cache_iterator\" json:\"cache_iterator,omitempty\"`\n\tNumShards         *int32              `protobuf:\"varint,18,opt,name=num_shards\" json:\"num_shards,omitempty\"`\n\tXXX_unrecognized  []byte              `json:\"-\"`\n}\n\nfunc (m *LogReadRequest) Reset()         { *m = LogReadRequest{} }\nfunc (m *LogReadRequest) String() string { return proto.CompactTextString(m) }\nfunc (*LogReadRequest) ProtoMessage()    {}\n\nfunc (m *LogReadRequest) GetAppId() string {\n\tif m != nil && m.AppId != nil {\n\t\treturn *m.AppId\n\t}\n\treturn \"\"\n}\n\nfunc (m *LogReadRequest) GetVersionId() []string {\n\tif m != nil {\n\t\treturn m.VersionId\n\t}\n\treturn nil\n}\n\nfunc (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {\n\tif m != nil {\n\t\treturn m.ModuleVersion\n\t}\n\treturn nil\n}\n\nfunc (m *LogReadRequest) GetStartTime() int64 {\n\tif m != nil && m.StartTime != nil {\n\t\treturn *m.StartTime\n\t}\n\treturn 0\n}\n\nfunc (m *LogReadRequest) GetEndTime() int64 {\n\tif m != nil && m.EndTime != nil {\n\t\treturn *m.EndTime\n\t}\n\treturn 0\n}\n\nfunc (m *LogReadRequest) GetOffset() *LogOffset {\n\tif m != nil {\n\t\treturn m.Offset\n\t}\n\treturn nil\n}\n\nfunc (m *LogReadRequest) GetRequestId() [][]byte {\n\tif m != nil {\n\t\treturn m.RequestId\n\t}\n\treturn nil\n}\n\nfunc (m *LogReadRequest) GetMinimumLogLevel() int32 {\n\tif m != nil && m.MinimumLogLevel != nil {\n\t\treturn *m.MinimumLogLevel\n\t}\n\treturn 0\n}\n\nfunc (m *LogReadRequest) GetIncludeIncomplete() bool {\n\tif m != nil && m.IncludeIncomplete != nil {\n\t\treturn *m.IncludeIncomplete\n\t}\n\treturn false\n}\n\nfunc (m *LogReadRequest) GetCount() int64 {\n\tif m != nil && m.Count != nil {\n\t\treturn *m.Count\n\t}\n\treturn 0\n}\n\nfunc (m *LogReadRequest) GetCombinedLogRegex() string {\n\tif m != nil && m.CombinedLogRegex != nil {\n\t\treturn *m.CombinedLogRegex\n\t}\n\treturn \"\"\n}\n\nfunc (m *LogReadRequest) GetHostRegex() string {\n\tif m != nil && m.HostRegex != nil {\n\t\treturn *m.HostRegex\n\t}\n\treturn \"\"\n}\n\nfunc (m *LogReadRequest) GetReplicaIndex() int32 {\n\tif m != nil && m.ReplicaIndex != nil {\n\t\treturn *m.ReplicaIndex\n\t}\n\treturn 0\n}\n\nfunc (m *LogReadRequest) GetIncludeAppLogs() bool {\n\tif m != nil && m.IncludeAppLogs != nil {\n\t\treturn *m.IncludeAppLogs\n\t}\n\treturn false\n}\n\nfunc (m *LogReadRequest) GetAppLogsPerRequest() int32 {\n\tif m != nil && m.AppLogsPerRequest != nil {\n\t\treturn *m.AppLogsPerRequest\n\t}\n\treturn 0\n}\n\nfunc (m *LogReadRequest) GetIncludeHost() bool {\n\tif m != nil && m.IncludeHost != nil {\n\t\treturn *m.IncludeHost\n\t}\n\treturn false\n}\n\nfunc (m *LogReadRequest) GetIncludeAll() bool {\n\tif m != nil && m.IncludeAll != nil {\n\t\treturn *m.IncludeAll\n\t}\n\treturn false\n}\n\nfunc (m *LogReadRequest) GetCacheIterator() bool {\n\tif m != nil && m.CacheIterator != nil {\n\t\treturn *m.CacheIterator\n\t}\n\treturn false\n}\n\nfunc (m *LogReadRequest) GetNumShards() int32 {\n\tif m != nil && m.NumShards != nil {\n\t\treturn *m.NumShards\n\t}\n\treturn 0\n}\n\ntype LogReadResponse struct {\n\tLog              []*RequestLog `protobuf:\"bytes,1,rep,name=log\" json:\"log,omitempty\"`\n\tOffset           *LogOffset    `protobuf:\"bytes,2,opt,name=offset\" json:\"offset,omitempty\"`\n\tLastEndTime      *int64        `protobuf:\"varint,3,opt,name=last_end_time\" json:\"last_end_time,omitempty\"`\n\tXXX_unrecognized []byte        `json:\"-\"`\n}\n\nfunc (m *LogReadResponse) Reset()         { *m = LogReadResponse{} }\nfunc (m *LogReadResponse) String() string { return proto.CompactTextString(m) }\nfunc (*LogReadResponse) ProtoMessage()    {}\n\nfunc (m *LogReadResponse) GetLog() []*RequestLog {\n\tif m != nil {\n\t\treturn m.Log\n\t}\n\treturn nil\n}\n\nfunc (m *LogReadResponse) GetOffset() *LogOffset {\n\tif m != nil {\n\t\treturn m.Offset\n\t}\n\treturn nil\n}\n\nfunc (m *LogReadResponse) GetLastEndTime() int64 {\n\tif m != nil && m.LastEndTime != nil {\n\t\treturn *m.LastEndTime\n\t}\n\treturn 0\n}\n\ntype LogUsageRecord struct {\n\tVersionId        *string `protobuf:\"bytes,1,opt,name=version_id\" json:\"version_id,omitempty\"`\n\tStartTime        *int32  `protobuf:\"varint,2,opt,name=start_time\" json:\"start_time,omitempty\"`\n\tEndTime          *int32  `protobuf:\"varint,3,opt,name=end_time\" json:\"end_time,omitempty\"`\n\tCount            *int64  `protobuf:\"varint,4,opt,name=count\" json:\"count,omitempty\"`\n\tTotalSize        *int64  `protobuf:\"varint,5,opt,name=total_size\" json:\"total_size,omitempty\"`\n\tRecords          *int32  `protobuf:\"varint,6,opt,name=records\" json:\"records,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *LogUsageRecord) Reset()         { *m = LogUsageRecord{} }\nfunc (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }\nfunc (*LogUsageRecord) ProtoMessage()    {}\n\nfunc (m *LogUsageRecord) GetVersionId() string {\n\tif m != nil && m.VersionId != nil {\n\t\treturn *m.VersionId\n\t}\n\treturn \"\"\n}\n\nfunc (m *LogUsageRecord) GetStartTime() int32 {\n\tif m != nil && m.StartTime != nil {\n\t\treturn *m.StartTime\n\t}\n\treturn 0\n}\n\nfunc (m *LogUsageRecord) GetEndTime() int32 {\n\tif m != nil && m.EndTime != nil {\n\t\treturn *m.EndTime\n\t}\n\treturn 0\n}\n\nfunc (m *LogUsageRecord) GetCount() int64 {\n\tif m != nil && m.Count != nil {\n\t\treturn *m.Count\n\t}\n\treturn 0\n}\n\nfunc (m *LogUsageRecord) GetTotalSize() int64 {\n\tif m != nil && m.TotalSize != nil {\n\t\treturn *m.TotalSize\n\t}\n\treturn 0\n}\n\nfunc (m *LogUsageRecord) GetRecords() int32 {\n\tif m != nil && m.Records != nil {\n\t\treturn *m.Records\n\t}\n\treturn 0\n}\n\ntype LogUsageRequest struct {\n\tAppId            *string  `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tVersionId        []string `protobuf:\"bytes,2,rep,name=version_id\" json:\"version_id,omitempty\"`\n\tStartTime        *int32   `protobuf:\"varint,3,opt,name=start_time\" json:\"start_time,omitempty\"`\n\tEndTime          *int32   `protobuf:\"varint,4,opt,name=end_time\" json:\"end_time,omitempty\"`\n\tResolutionHours  *uint32  `protobuf:\"varint,5,opt,name=resolution_hours,def=1\" json:\"resolution_hours,omitempty\"`\n\tCombineVersions  *bool    `protobuf:\"varint,6,opt,name=combine_versions\" json:\"combine_versions,omitempty\"`\n\tUsageVersion     *int32   `protobuf:\"varint,7,opt,name=usage_version\" json:\"usage_version,omitempty\"`\n\tVersionsOnly     *bool    `protobuf:\"varint,8,opt,name=versions_only\" json:\"versions_only,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *LogUsageRequest) Reset()         { *m = LogUsageRequest{} }\nfunc (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }\nfunc (*LogUsageRequest) ProtoMessage()    {}\n\nconst Default_LogUsageRequest_ResolutionHours uint32 = 1\n\nfunc (m *LogUsageRequest) GetAppId() string {\n\tif m != nil && m.AppId != nil {\n\t\treturn *m.AppId\n\t}\n\treturn \"\"\n}\n\nfunc (m *LogUsageRequest) GetVersionId() []string {\n\tif m != nil {\n\t\treturn m.VersionId\n\t}\n\treturn nil\n}\n\nfunc (m *LogUsageRequest) GetStartTime() int32 {\n\tif m != nil && m.StartTime != nil {\n\t\treturn *m.StartTime\n\t}\n\treturn 0\n}\n\nfunc (m *LogUsageRequest) GetEndTime() int32 {\n\tif m != nil && m.EndTime != nil {\n\t\treturn *m.EndTime\n\t}\n\treturn 0\n}\n\nfunc (m *LogUsageRequest) GetResolutionHours() uint32 {\n\tif m != nil && m.ResolutionHours != nil {\n\t\treturn *m.ResolutionHours\n\t}\n\treturn Default_LogUsageRequest_ResolutionHours\n}\n\nfunc (m *LogUsageRequest) GetCombineVersions() bool {\n\tif m != nil && m.CombineVersions != nil {\n\t\treturn *m.CombineVersions\n\t}\n\treturn false\n}\n\nfunc (m *LogUsageRequest) GetUsageVersion() int32 {\n\tif m != nil && m.UsageVersion != nil {\n\t\treturn *m.UsageVersion\n\t}\n\treturn 0\n}\n\nfunc (m *LogUsageRequest) GetVersionsOnly() bool {\n\tif m != nil && m.VersionsOnly != nil {\n\t\treturn *m.VersionsOnly\n\t}\n\treturn false\n}\n\ntype LogUsageResponse struct {\n\tUsage            []*LogUsageRecord `protobuf:\"bytes,1,rep,name=usage\" json:\"usage,omitempty\"`\n\tSummary          *LogUsageRecord   `protobuf:\"bytes,2,opt,name=summary\" json:\"summary,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *LogUsageResponse) Reset()         { *m = LogUsageResponse{} }\nfunc (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }\nfunc (*LogUsageResponse) ProtoMessage()    {}\n\nfunc (m *LogUsageResponse) GetUsage() []*LogUsageRecord {\n\tif m != nil {\n\t\treturn m.Usage\n\t}\n\treturn nil\n}\n\nfunc (m *LogUsageResponse) GetSummary() *LogUsageRecord {\n\tif m != nil {\n\t\treturn m.Summary\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/log/log_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"log\";\n\npackage appengine;\n\nmessage LogServiceError {\n  enum ErrorCode {\n    OK  = 0;\n    INVALID_REQUEST = 1;\n    STORAGE_ERROR = 2;\n  }\n}\n\nmessage UserAppLogLine {\n  required int64 timestamp_usec = 1;\n  required int64 level = 2;\n  required string message = 3;\n}\n\nmessage UserAppLogGroup {\n  repeated UserAppLogLine log_line = 2;\n}\n\nmessage FlushRequest {\n  optional bytes logs = 1;\n}\n\nmessage SetStatusRequest {\n  required string status = 1;\n}\n\n\nmessage LogOffset {\n  optional bytes request_id = 1;\n}\n\nmessage LogLine {\n  required int64 time = 1;\n  required int32 level = 2;\n  required string log_message = 3;\n}\n\nmessage RequestLog {\n  required string app_id = 1;\n  optional string module_id = 37 [default=\"default\"];\n  required string version_id = 2;\n  required bytes request_id = 3;\n  optional LogOffset offset = 35;\n  required string ip = 4;\n  optional string nickname = 5;\n  required int64 start_time = 6;\n  required int64 end_time = 7;\n  required int64 latency = 8;\n  required int64 mcycles = 9;\n  required string method = 10;\n  required string resource = 11;\n  required string http_version = 12;\n  required int32 status = 13;\n  required int64 response_size = 14;\n  optional string referrer = 15;\n  optional string user_agent = 16;\n  required string url_map_entry = 17;\n  required string combined = 18;\n  optional int64 api_mcycles = 19;\n  optional string host = 20;\n  optional double cost = 21;\n\n  optional string task_queue_name = 22;\n  optional string task_name = 23;\n\n  optional bool was_loading_request = 24;\n  optional int64 pending_time = 25;\n  optional int32 replica_index = 26 [default = -1];\n  optional bool finished = 27 [default = true];\n  optional bytes clone_key = 28;\n\n  repeated LogLine line = 29;\n\n  optional bool lines_incomplete = 36;\n  optional bytes app_engine_release = 38;\n\n  optional int32 exit_reason = 30;\n  optional bool was_throttled_for_time = 31;\n  optional bool was_throttled_for_requests = 32;\n  optional int64 throttled_time = 33;\n\n  optional bytes server_name = 34;\n}\n\nmessage LogModuleVersion {\n  optional string module_id = 1 [default=\"default\"];\n  optional string version_id = 2;\n}\n\nmessage LogReadRequest {\n  required string app_id = 1;\n  repeated string version_id = 2;\n  repeated LogModuleVersion module_version = 19;\n\n  optional int64 start_time = 3;\n  optional int64 end_time = 4;\n  optional LogOffset offset = 5;\n  repeated bytes request_id = 6;\n\n  optional int32 minimum_log_level = 7;\n  optional bool include_incomplete = 8;\n  optional int64 count = 9;\n\n  optional string combined_log_regex = 14;\n  optional string host_regex = 15;\n  optional int32 replica_index = 16;\n\n  optional bool include_app_logs = 10;\n  optional int32 app_logs_per_request = 17;\n  optional bool include_host = 11;\n  optional bool include_all = 12;\n  optional bool cache_iterator = 13;\n  optional int32 num_shards = 18;\n}\n\nmessage LogReadResponse {\n  repeated RequestLog log = 1;\n  optional LogOffset offset = 2;\n  optional int64 last_end_time = 3;\n}\n\nmessage LogUsageRecord {\n  optional string version_id = 1;\n  optional int32 start_time = 2;\n  optional int32 end_time = 3;\n  optional int64 count = 4;\n  optional int64 total_size = 5;\n  optional int32 records = 6;\n}\n\nmessage LogUsageRequest {\n  required string app_id = 1;\n  repeated string version_id = 2;\n  optional int32 start_time = 3;\n  optional int32 end_time = 4;\n  optional uint32 resolution_hours = 5 [default = 1];\n  optional bool combine_versions = 6;\n  optional int32 usage_version = 7;\n  optional bool versions_only = 8;\n}\n\nmessage LogUsageResponse {\n  repeated LogUsageRecord usage = 1;\n  optional LogUsageRecord summary = 2;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/mail/mail_service.proto\n// DO NOT EDIT!\n\n/*\nPackage mail is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/mail/mail_service.proto\n\nIt has these top-level messages:\n\tMailServiceError\n\tMailAttachment\n\tMailHeader\n\tMailMessage\n*/\npackage mail\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype MailServiceError_ErrorCode int32\n\nconst (\n\tMailServiceError_OK                      MailServiceError_ErrorCode = 0\n\tMailServiceError_INTERNAL_ERROR          MailServiceError_ErrorCode = 1\n\tMailServiceError_BAD_REQUEST             MailServiceError_ErrorCode = 2\n\tMailServiceError_UNAUTHORIZED_SENDER     MailServiceError_ErrorCode = 3\n\tMailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4\n\tMailServiceError_INVALID_HEADER_NAME     MailServiceError_ErrorCode = 5\n\tMailServiceError_INVALID_CONTENT_ID      MailServiceError_ErrorCode = 6\n)\n\nvar MailServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"INTERNAL_ERROR\",\n\t2: \"BAD_REQUEST\",\n\t3: \"UNAUTHORIZED_SENDER\",\n\t4: \"INVALID_ATTACHMENT_TYPE\",\n\t5: \"INVALID_HEADER_NAME\",\n\t6: \"INVALID_CONTENT_ID\",\n}\nvar MailServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                      0,\n\t\"INTERNAL_ERROR\":          1,\n\t\"BAD_REQUEST\":             2,\n\t\"UNAUTHORIZED_SENDER\":     3,\n\t\"INVALID_ATTACHMENT_TYPE\": 4,\n\t\"INVALID_HEADER_NAME\":     5,\n\t\"INVALID_CONTENT_ID\":      6,\n}\n\nfunc (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode {\n\tp := new(MailServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x MailServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(MailServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, \"MailServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MailServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype MailServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *MailServiceError) Reset()         { *m = MailServiceError{} }\nfunc (m *MailServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*MailServiceError) ProtoMessage()    {}\n\ntype MailAttachment struct {\n\tFileName         *string `protobuf:\"bytes,1,req,name=FileName\" json:\"FileName,omitempty\"`\n\tData             []byte  `protobuf:\"bytes,2,req,name=Data\" json:\"Data,omitempty\"`\n\tContentID        *string `protobuf:\"bytes,3,opt,name=ContentID\" json:\"ContentID,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *MailAttachment) Reset()         { *m = MailAttachment{} }\nfunc (m *MailAttachment) String() string { return proto.CompactTextString(m) }\nfunc (*MailAttachment) ProtoMessage()    {}\n\nfunc (m *MailAttachment) GetFileName() string {\n\tif m != nil && m.FileName != nil {\n\t\treturn *m.FileName\n\t}\n\treturn \"\"\n}\n\nfunc (m *MailAttachment) GetData() []byte {\n\tif m != nil {\n\t\treturn m.Data\n\t}\n\treturn nil\n}\n\nfunc (m *MailAttachment) GetContentID() string {\n\tif m != nil && m.ContentID != nil {\n\t\treturn *m.ContentID\n\t}\n\treturn \"\"\n}\n\ntype MailHeader struct {\n\tName             *string `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tValue            *string `protobuf:\"bytes,2,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *MailHeader) Reset()         { *m = MailHeader{} }\nfunc (m *MailHeader) String() string { return proto.CompactTextString(m) }\nfunc (*MailHeader) ProtoMessage()    {}\n\nfunc (m *MailHeader) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *MailHeader) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\ntype MailMessage struct {\n\tSender           *string           `protobuf:\"bytes,1,req,name=Sender\" json:\"Sender,omitempty\"`\n\tReplyTo          *string           `protobuf:\"bytes,2,opt,name=ReplyTo\" json:\"ReplyTo,omitempty\"`\n\tTo               []string          `protobuf:\"bytes,3,rep,name=To\" json:\"To,omitempty\"`\n\tCc               []string          `protobuf:\"bytes,4,rep,name=Cc\" json:\"Cc,omitempty\"`\n\tBcc              []string          `protobuf:\"bytes,5,rep,name=Bcc\" json:\"Bcc,omitempty\"`\n\tSubject          *string           `protobuf:\"bytes,6,req,name=Subject\" json:\"Subject,omitempty\"`\n\tTextBody         *string           `protobuf:\"bytes,7,opt,name=TextBody\" json:\"TextBody,omitempty\"`\n\tHtmlBody         *string           `protobuf:\"bytes,8,opt,name=HtmlBody\" json:\"HtmlBody,omitempty\"`\n\tAttachment       []*MailAttachment `protobuf:\"bytes,9,rep,name=Attachment\" json:\"Attachment,omitempty\"`\n\tHeader           []*MailHeader     `protobuf:\"bytes,10,rep,name=Header\" json:\"Header,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *MailMessage) Reset()         { *m = MailMessage{} }\nfunc (m *MailMessage) String() string { return proto.CompactTextString(m) }\nfunc (*MailMessage) ProtoMessage()    {}\n\nfunc (m *MailMessage) GetSender() string {\n\tif m != nil && m.Sender != nil {\n\t\treturn *m.Sender\n\t}\n\treturn \"\"\n}\n\nfunc (m *MailMessage) GetReplyTo() string {\n\tif m != nil && m.ReplyTo != nil {\n\t\treturn *m.ReplyTo\n\t}\n\treturn \"\"\n}\n\nfunc (m *MailMessage) GetTo() []string {\n\tif m != nil {\n\t\treturn m.To\n\t}\n\treturn nil\n}\n\nfunc (m *MailMessage) GetCc() []string {\n\tif m != nil {\n\t\treturn m.Cc\n\t}\n\treturn nil\n}\n\nfunc (m *MailMessage) GetBcc() []string {\n\tif m != nil {\n\t\treturn m.Bcc\n\t}\n\treturn nil\n}\n\nfunc (m *MailMessage) GetSubject() string {\n\tif m != nil && m.Subject != nil {\n\t\treturn *m.Subject\n\t}\n\treturn \"\"\n}\n\nfunc (m *MailMessage) GetTextBody() string {\n\tif m != nil && m.TextBody != nil {\n\t\treturn *m.TextBody\n\t}\n\treturn \"\"\n}\n\nfunc (m *MailMessage) GetHtmlBody() string {\n\tif m != nil && m.HtmlBody != nil {\n\t\treturn *m.HtmlBody\n\t}\n\treturn \"\"\n}\n\nfunc (m *MailMessage) GetAttachment() []*MailAttachment {\n\tif m != nil {\n\t\treturn m.Attachment\n\t}\n\treturn nil\n}\n\nfunc (m *MailMessage) GetHeader() []*MailHeader {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/mail/mail_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"mail\";\n\npackage appengine;\n\nmessage MailServiceError {\n  enum ErrorCode {\n    OK = 0;\n    INTERNAL_ERROR = 1;\n    BAD_REQUEST = 2;\n    UNAUTHORIZED_SENDER = 3;\n    INVALID_ATTACHMENT_TYPE = 4;\n    INVALID_HEADER_NAME = 5;\n    INVALID_CONTENT_ID = 6;\n  }\n}\n\nmessage MailAttachment {\n  required string FileName = 1;\n  required bytes Data = 2;\n  optional string ContentID = 3;\n}\n\nmessage MailHeader {\n  required string name = 1;\n  required string value = 2;\n}\n\nmessage MailMessage {\n  required string Sender = 1;\n  optional string ReplyTo = 2;\n\n  repeated string To = 3;\n  repeated string Cc = 4;\n  repeated string Bcc = 5;\n\n  required string Subject = 6;\n\n  optional string TextBody = 7;\n  optional string HtmlBody = 8;\n\n  repeated MailAttachment Attachment = 9;\n\n  repeated MailHeader Header = 10;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/memcache/memcache_service.proto\n// DO NOT EDIT!\n\n/*\nPackage memcache is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/memcache/memcache_service.proto\n\nIt has these top-level messages:\n\tMemcacheServiceError\n\tAppOverride\n\tMemcacheGetRequest\n\tMemcacheGetResponse\n\tMemcacheSetRequest\n\tMemcacheSetResponse\n\tMemcacheDeleteRequest\n\tMemcacheDeleteResponse\n\tMemcacheIncrementRequest\n\tMemcacheIncrementResponse\n\tMemcacheBatchIncrementRequest\n\tMemcacheBatchIncrementResponse\n\tMemcacheFlushRequest\n\tMemcacheFlushResponse\n\tMemcacheStatsRequest\n\tMergedNamespaceStats\n\tMemcacheStatsResponse\n\tMemcacheGrabTailRequest\n\tMemcacheGrabTailResponse\n*/\npackage memcache\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype MemcacheServiceError_ErrorCode int32\n\nconst (\n\tMemcacheServiceError_OK                MemcacheServiceError_ErrorCode = 0\n\tMemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1\n\tMemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2\n\tMemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3\n\tMemcacheServiceError_INVALID_VALUE     MemcacheServiceError_ErrorCode = 6\n)\n\nvar MemcacheServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"UNSPECIFIED_ERROR\",\n\t2: \"NAMESPACE_NOT_SET\",\n\t3: \"PERMISSION_DENIED\",\n\t6: \"INVALID_VALUE\",\n}\nvar MemcacheServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                0,\n\t\"UNSPECIFIED_ERROR\": 1,\n\t\"NAMESPACE_NOT_SET\": 2,\n\t\"PERMISSION_DENIED\": 3,\n\t\"INVALID_VALUE\":     6,\n}\n\nfunc (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode {\n\tp := new(MemcacheServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x MemcacheServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, \"MemcacheServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MemcacheServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype MemcacheSetRequest_SetPolicy int32\n\nconst (\n\tMemcacheSetRequest_SET     MemcacheSetRequest_SetPolicy = 1\n\tMemcacheSetRequest_ADD     MemcacheSetRequest_SetPolicy = 2\n\tMemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3\n\tMemcacheSetRequest_CAS     MemcacheSetRequest_SetPolicy = 4\n)\n\nvar MemcacheSetRequest_SetPolicy_name = map[int32]string{\n\t1: \"SET\",\n\t2: \"ADD\",\n\t3: \"REPLACE\",\n\t4: \"CAS\",\n}\nvar MemcacheSetRequest_SetPolicy_value = map[string]int32{\n\t\"SET\":     1,\n\t\"ADD\":     2,\n\t\"REPLACE\": 3,\n\t\"CAS\":     4,\n}\n\nfunc (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy {\n\tp := new(MemcacheSetRequest_SetPolicy)\n\t*p = x\n\treturn p\n}\nfunc (x MemcacheSetRequest_SetPolicy) String() string {\n\treturn proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x))\n}\nfunc (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, \"MemcacheSetRequest_SetPolicy\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MemcacheSetRequest_SetPolicy(value)\n\treturn nil\n}\n\ntype MemcacheSetResponse_SetStatusCode int32\n\nconst (\n\tMemcacheSetResponse_STORED     MemcacheSetResponse_SetStatusCode = 1\n\tMemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2\n\tMemcacheSetResponse_ERROR      MemcacheSetResponse_SetStatusCode = 3\n\tMemcacheSetResponse_EXISTS     MemcacheSetResponse_SetStatusCode = 4\n)\n\nvar MemcacheSetResponse_SetStatusCode_name = map[int32]string{\n\t1: \"STORED\",\n\t2: \"NOT_STORED\",\n\t3: \"ERROR\",\n\t4: \"EXISTS\",\n}\nvar MemcacheSetResponse_SetStatusCode_value = map[string]int32{\n\t\"STORED\":     1,\n\t\"NOT_STORED\": 2,\n\t\"ERROR\":      3,\n\t\"EXISTS\":     4,\n}\n\nfunc (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode {\n\tp := new(MemcacheSetResponse_SetStatusCode)\n\t*p = x\n\treturn p\n}\nfunc (x MemcacheSetResponse_SetStatusCode) String() string {\n\treturn proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x))\n}\nfunc (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, \"MemcacheSetResponse_SetStatusCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MemcacheSetResponse_SetStatusCode(value)\n\treturn nil\n}\n\ntype MemcacheDeleteResponse_DeleteStatusCode int32\n\nconst (\n\tMemcacheDeleteResponse_DELETED   MemcacheDeleteResponse_DeleteStatusCode = 1\n\tMemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2\n)\n\nvar MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{\n\t1: \"DELETED\",\n\t2: \"NOT_FOUND\",\n}\nvar MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{\n\t\"DELETED\":   1,\n\t\"NOT_FOUND\": 2,\n}\n\nfunc (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode {\n\tp := new(MemcacheDeleteResponse_DeleteStatusCode)\n\t*p = x\n\treturn p\n}\nfunc (x MemcacheDeleteResponse_DeleteStatusCode) String() string {\n\treturn proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x))\n}\nfunc (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, \"MemcacheDeleteResponse_DeleteStatusCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MemcacheDeleteResponse_DeleteStatusCode(value)\n\treturn nil\n}\n\ntype MemcacheIncrementRequest_Direction int32\n\nconst (\n\tMemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1\n\tMemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2\n)\n\nvar MemcacheIncrementRequest_Direction_name = map[int32]string{\n\t1: \"INCREMENT\",\n\t2: \"DECREMENT\",\n}\nvar MemcacheIncrementRequest_Direction_value = map[string]int32{\n\t\"INCREMENT\": 1,\n\t\"DECREMENT\": 2,\n}\n\nfunc (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction {\n\tp := new(MemcacheIncrementRequest_Direction)\n\t*p = x\n\treturn p\n}\nfunc (x MemcacheIncrementRequest_Direction) String() string {\n\treturn proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x))\n}\nfunc (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, \"MemcacheIncrementRequest_Direction\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MemcacheIncrementRequest_Direction(value)\n\treturn nil\n}\n\ntype MemcacheIncrementResponse_IncrementStatusCode int32\n\nconst (\n\tMemcacheIncrementResponse_OK          MemcacheIncrementResponse_IncrementStatusCode = 1\n\tMemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2\n\tMemcacheIncrementResponse_ERROR       MemcacheIncrementResponse_IncrementStatusCode = 3\n)\n\nvar MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{\n\t1: \"OK\",\n\t2: \"NOT_CHANGED\",\n\t3: \"ERROR\",\n}\nvar MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{\n\t\"OK\":          1,\n\t\"NOT_CHANGED\": 2,\n\t\"ERROR\":       3,\n}\n\nfunc (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode {\n\tp := new(MemcacheIncrementResponse_IncrementStatusCode)\n\t*p = x\n\treturn p\n}\nfunc (x MemcacheIncrementResponse_IncrementStatusCode) String() string {\n\treturn proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x))\n}\nfunc (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, \"MemcacheIncrementResponse_IncrementStatusCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MemcacheIncrementResponse_IncrementStatusCode(value)\n\treturn nil\n}\n\ntype MemcacheServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *MemcacheServiceError) Reset()         { *m = MemcacheServiceError{} }\nfunc (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheServiceError) ProtoMessage()    {}\n\ntype AppOverride struct {\n\tAppId                    *string `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tNumMemcachegBackends     *int32  `protobuf:\"varint,2,opt,name=num_memcacheg_backends\" json:\"num_memcacheg_backends,omitempty\"`\n\tIgnoreShardlock          *bool   `protobuf:\"varint,3,opt,name=ignore_shardlock\" json:\"ignore_shardlock,omitempty\"`\n\tMemcachePoolHint         *string `protobuf:\"bytes,4,opt,name=memcache_pool_hint\" json:\"memcache_pool_hint,omitempty\"`\n\tMemcacheShardingStrategy []byte  `protobuf:\"bytes,5,opt,name=memcache_sharding_strategy\" json:\"memcache_sharding_strategy,omitempty\"`\n\tXXX_unrecognized         []byte  `json:\"-\"`\n}\n\nfunc (m *AppOverride) Reset()         { *m = AppOverride{} }\nfunc (m *AppOverride) String() string { return proto.CompactTextString(m) }\nfunc (*AppOverride) ProtoMessage()    {}\n\nfunc (m *AppOverride) GetAppId() string {\n\tif m != nil && m.AppId != nil {\n\t\treturn *m.AppId\n\t}\n\treturn \"\"\n}\n\nfunc (m *AppOverride) GetNumMemcachegBackends() int32 {\n\tif m != nil && m.NumMemcachegBackends != nil {\n\t\treturn *m.NumMemcachegBackends\n\t}\n\treturn 0\n}\n\nfunc (m *AppOverride) GetIgnoreShardlock() bool {\n\tif m != nil && m.IgnoreShardlock != nil {\n\t\treturn *m.IgnoreShardlock\n\t}\n\treturn false\n}\n\nfunc (m *AppOverride) GetMemcachePoolHint() string {\n\tif m != nil && m.MemcachePoolHint != nil {\n\t\treturn *m.MemcachePoolHint\n\t}\n\treturn \"\"\n}\n\nfunc (m *AppOverride) GetMemcacheShardingStrategy() []byte {\n\tif m != nil {\n\t\treturn m.MemcacheShardingStrategy\n\t}\n\treturn nil\n}\n\ntype MemcacheGetRequest struct {\n\tKey              [][]byte     `protobuf:\"bytes,1,rep,name=key\" json:\"key,omitempty\"`\n\tNameSpace        *string      `protobuf:\"bytes,2,opt,name=name_space,def=\" json:\"name_space,omitempty\"`\n\tForCas           *bool        `protobuf:\"varint,4,opt,name=for_cas\" json:\"for_cas,omitempty\"`\n\tOverride         *AppOverride `protobuf:\"bytes,5,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *MemcacheGetRequest) Reset()         { *m = MemcacheGetRequest{} }\nfunc (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheGetRequest) ProtoMessage()    {}\n\nfunc (m *MemcacheGetRequest) GetKey() [][]byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheGetRequest) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *MemcacheGetRequest) GetForCas() bool {\n\tif m != nil && m.ForCas != nil {\n\t\treturn *m.ForCas\n\t}\n\treturn false\n}\n\nfunc (m *MemcacheGetRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MemcacheGetResponse struct {\n\tItem             []*MemcacheGetResponse_Item `protobuf:\"group,1,rep,name=Item\" json:\"item,omitempty\"`\n\tXXX_unrecognized []byte                      `json:\"-\"`\n}\n\nfunc (m *MemcacheGetResponse) Reset()         { *m = MemcacheGetResponse{} }\nfunc (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheGetResponse) ProtoMessage()    {}\n\nfunc (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item {\n\tif m != nil {\n\t\treturn m.Item\n\t}\n\treturn nil\n}\n\ntype MemcacheGetResponse_Item struct {\n\tKey              []byte  `protobuf:\"bytes,2,req,name=key\" json:\"key,omitempty\"`\n\tValue            []byte  `protobuf:\"bytes,3,req,name=value\" json:\"value,omitempty\"`\n\tFlags            *uint32 `protobuf:\"fixed32,4,opt,name=flags\" json:\"flags,omitempty\"`\n\tCasId            *uint64 `protobuf:\"fixed64,5,opt,name=cas_id\" json:\"cas_id,omitempty\"`\n\tExpiresInSeconds *int32  `protobuf:\"varint,6,opt,name=expires_in_seconds\" json:\"expires_in_seconds,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *MemcacheGetResponse_Item) Reset()         { *m = MemcacheGetResponse_Item{} }\nfunc (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheGetResponse_Item) ProtoMessage()    {}\n\nfunc (m *MemcacheGetResponse_Item) GetKey() []byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheGetResponse_Item) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheGetResponse_Item) GetFlags() uint32 {\n\tif m != nil && m.Flags != nil {\n\t\treturn *m.Flags\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheGetResponse_Item) GetCasId() uint64 {\n\tif m != nil && m.CasId != nil {\n\t\treturn *m.CasId\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 {\n\tif m != nil && m.ExpiresInSeconds != nil {\n\t\treturn *m.ExpiresInSeconds\n\t}\n\treturn 0\n}\n\ntype MemcacheSetRequest struct {\n\tItem             []*MemcacheSetRequest_Item `protobuf:\"group,1,rep,name=Item\" json:\"item,omitempty\"`\n\tNameSpace        *string                    `protobuf:\"bytes,7,opt,name=name_space,def=\" json:\"name_space,omitempty\"`\n\tOverride         *AppOverride               `protobuf:\"bytes,10,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte                     `json:\"-\"`\n}\n\nfunc (m *MemcacheSetRequest) Reset()         { *m = MemcacheSetRequest{} }\nfunc (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheSetRequest) ProtoMessage()    {}\n\nfunc (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item {\n\tif m != nil {\n\t\treturn m.Item\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheSetRequest) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *MemcacheSetRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MemcacheSetRequest_Item struct {\n\tKey              []byte                        `protobuf:\"bytes,2,req,name=key\" json:\"key,omitempty\"`\n\tValue            []byte                        `protobuf:\"bytes,3,req,name=value\" json:\"value,omitempty\"`\n\tFlags            *uint32                       `protobuf:\"fixed32,4,opt,name=flags\" json:\"flags,omitempty\"`\n\tSetPolicy        *MemcacheSetRequest_SetPolicy `protobuf:\"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1\" json:\"set_policy,omitempty\"`\n\tExpirationTime   *uint32                       `protobuf:\"fixed32,6,opt,name=expiration_time,def=0\" json:\"expiration_time,omitempty\"`\n\tCasId            *uint64                       `protobuf:\"fixed64,8,opt,name=cas_id\" json:\"cas_id,omitempty\"`\n\tForCas           *bool                         `protobuf:\"varint,9,opt,name=for_cas\" json:\"for_cas,omitempty\"`\n\tXXX_unrecognized []byte                        `json:\"-\"`\n}\n\nfunc (m *MemcacheSetRequest_Item) Reset()         { *m = MemcacheSetRequest_Item{} }\nfunc (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheSetRequest_Item) ProtoMessage()    {}\n\nconst Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET\nconst Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0\n\nfunc (m *MemcacheSetRequest_Item) GetKey() []byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheSetRequest_Item) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheSetRequest_Item) GetFlags() uint32 {\n\tif m != nil && m.Flags != nil {\n\t\treturn *m.Flags\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy {\n\tif m != nil && m.SetPolicy != nil {\n\t\treturn *m.SetPolicy\n\t}\n\treturn Default_MemcacheSetRequest_Item_SetPolicy\n}\n\nfunc (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 {\n\tif m != nil && m.ExpirationTime != nil {\n\t\treturn *m.ExpirationTime\n\t}\n\treturn Default_MemcacheSetRequest_Item_ExpirationTime\n}\n\nfunc (m *MemcacheSetRequest_Item) GetCasId() uint64 {\n\tif m != nil && m.CasId != nil {\n\t\treturn *m.CasId\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheSetRequest_Item) GetForCas() bool {\n\tif m != nil && m.ForCas != nil {\n\t\treturn *m.ForCas\n\t}\n\treturn false\n}\n\ntype MemcacheSetResponse struct {\n\tSetStatus        []MemcacheSetResponse_SetStatusCode `protobuf:\"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode\" json:\"set_status,omitempty\"`\n\tXXX_unrecognized []byte                              `json:\"-\"`\n}\n\nfunc (m *MemcacheSetResponse) Reset()         { *m = MemcacheSetResponse{} }\nfunc (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheSetResponse) ProtoMessage()    {}\n\nfunc (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode {\n\tif m != nil {\n\t\treturn m.SetStatus\n\t}\n\treturn nil\n}\n\ntype MemcacheDeleteRequest struct {\n\tItem             []*MemcacheDeleteRequest_Item `protobuf:\"group,1,rep,name=Item\" json:\"item,omitempty\"`\n\tNameSpace        *string                       `protobuf:\"bytes,4,opt,name=name_space,def=\" json:\"name_space,omitempty\"`\n\tOverride         *AppOverride                  `protobuf:\"bytes,5,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte                        `json:\"-\"`\n}\n\nfunc (m *MemcacheDeleteRequest) Reset()         { *m = MemcacheDeleteRequest{} }\nfunc (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheDeleteRequest) ProtoMessage()    {}\n\nfunc (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item {\n\tif m != nil {\n\t\treturn m.Item\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheDeleteRequest) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *MemcacheDeleteRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MemcacheDeleteRequest_Item struct {\n\tKey              []byte  `protobuf:\"bytes,2,req,name=key\" json:\"key,omitempty\"`\n\tDeleteTime       *uint32 `protobuf:\"fixed32,3,opt,name=delete_time,def=0\" json:\"delete_time,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *MemcacheDeleteRequest_Item) Reset()         { *m = MemcacheDeleteRequest_Item{} }\nfunc (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheDeleteRequest_Item) ProtoMessage()    {}\n\nconst Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0\n\nfunc (m *MemcacheDeleteRequest_Item) GetKey() []byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 {\n\tif m != nil && m.DeleteTime != nil {\n\t\treturn *m.DeleteTime\n\t}\n\treturn Default_MemcacheDeleteRequest_Item_DeleteTime\n}\n\ntype MemcacheDeleteResponse struct {\n\tDeleteStatus     []MemcacheDeleteResponse_DeleteStatusCode `protobuf:\"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode\" json:\"delete_status,omitempty\"`\n\tXXX_unrecognized []byte                                    `json:\"-\"`\n}\n\nfunc (m *MemcacheDeleteResponse) Reset()         { *m = MemcacheDeleteResponse{} }\nfunc (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheDeleteResponse) ProtoMessage()    {}\n\nfunc (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode {\n\tif m != nil {\n\t\treturn m.DeleteStatus\n\t}\n\treturn nil\n}\n\ntype MemcacheIncrementRequest struct {\n\tKey              []byte                              `protobuf:\"bytes,1,req,name=key\" json:\"key,omitempty\"`\n\tNameSpace        *string                             `protobuf:\"bytes,4,opt,name=name_space,def=\" json:\"name_space,omitempty\"`\n\tDelta            *uint64                             `protobuf:\"varint,2,opt,name=delta,def=1\" json:\"delta,omitempty\"`\n\tDirection        *MemcacheIncrementRequest_Direction `protobuf:\"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1\" json:\"direction,omitempty\"`\n\tInitialValue     *uint64                             `protobuf:\"varint,5,opt,name=initial_value\" json:\"initial_value,omitempty\"`\n\tInitialFlags     *uint32                             `protobuf:\"fixed32,6,opt,name=initial_flags\" json:\"initial_flags,omitempty\"`\n\tOverride         *AppOverride                        `protobuf:\"bytes,7,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte                              `json:\"-\"`\n}\n\nfunc (m *MemcacheIncrementRequest) Reset()         { *m = MemcacheIncrementRequest{} }\nfunc (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheIncrementRequest) ProtoMessage()    {}\n\nconst Default_MemcacheIncrementRequest_Delta uint64 = 1\nconst Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT\n\nfunc (m *MemcacheIncrementRequest) GetKey() []byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheIncrementRequest) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *MemcacheIncrementRequest) GetDelta() uint64 {\n\tif m != nil && m.Delta != nil {\n\t\treturn *m.Delta\n\t}\n\treturn Default_MemcacheIncrementRequest_Delta\n}\n\nfunc (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction {\n\tif m != nil && m.Direction != nil {\n\t\treturn *m.Direction\n\t}\n\treturn Default_MemcacheIncrementRequest_Direction\n}\n\nfunc (m *MemcacheIncrementRequest) GetInitialValue() uint64 {\n\tif m != nil && m.InitialValue != nil {\n\t\treturn *m.InitialValue\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheIncrementRequest) GetInitialFlags() uint32 {\n\tif m != nil && m.InitialFlags != nil {\n\t\treturn *m.InitialFlags\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheIncrementRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MemcacheIncrementResponse struct {\n\tNewValue         *uint64                                        `protobuf:\"varint,1,opt,name=new_value\" json:\"new_value,omitempty\"`\n\tIncrementStatus  *MemcacheIncrementResponse_IncrementStatusCode `protobuf:\"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode\" json:\"increment_status,omitempty\"`\n\tXXX_unrecognized []byte                                         `json:\"-\"`\n}\n\nfunc (m *MemcacheIncrementResponse) Reset()         { *m = MemcacheIncrementResponse{} }\nfunc (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheIncrementResponse) ProtoMessage()    {}\n\nfunc (m *MemcacheIncrementResponse) GetNewValue() uint64 {\n\tif m != nil && m.NewValue != nil {\n\t\treturn *m.NewValue\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode {\n\tif m != nil && m.IncrementStatus != nil {\n\t\treturn *m.IncrementStatus\n\t}\n\treturn MemcacheIncrementResponse_OK\n}\n\ntype MemcacheBatchIncrementRequest struct {\n\tNameSpace        *string                     `protobuf:\"bytes,1,opt,name=name_space,def=\" json:\"name_space,omitempty\"`\n\tItem             []*MemcacheIncrementRequest `protobuf:\"bytes,2,rep,name=item\" json:\"item,omitempty\"`\n\tOverride         *AppOverride                `protobuf:\"bytes,3,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte                      `json:\"-\"`\n}\n\nfunc (m *MemcacheBatchIncrementRequest) Reset()         { *m = MemcacheBatchIncrementRequest{} }\nfunc (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheBatchIncrementRequest) ProtoMessage()    {}\n\nfunc (m *MemcacheBatchIncrementRequest) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest {\n\tif m != nil {\n\t\treturn m.Item\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MemcacheBatchIncrementResponse struct {\n\tItem             []*MemcacheIncrementResponse `protobuf:\"bytes,1,rep,name=item\" json:\"item,omitempty\"`\n\tXXX_unrecognized []byte                       `json:\"-\"`\n}\n\nfunc (m *MemcacheBatchIncrementResponse) Reset()         { *m = MemcacheBatchIncrementResponse{} }\nfunc (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheBatchIncrementResponse) ProtoMessage()    {}\n\nfunc (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse {\n\tif m != nil {\n\t\treturn m.Item\n\t}\n\treturn nil\n}\n\ntype MemcacheFlushRequest struct {\n\tOverride         *AppOverride `protobuf:\"bytes,1,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *MemcacheFlushRequest) Reset()         { *m = MemcacheFlushRequest{} }\nfunc (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheFlushRequest) ProtoMessage()    {}\n\nfunc (m *MemcacheFlushRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MemcacheFlushResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *MemcacheFlushResponse) Reset()         { *m = MemcacheFlushResponse{} }\nfunc (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheFlushResponse) ProtoMessage()    {}\n\ntype MemcacheStatsRequest struct {\n\tOverride         *AppOverride `protobuf:\"bytes,1,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *MemcacheStatsRequest) Reset()         { *m = MemcacheStatsRequest{} }\nfunc (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheStatsRequest) ProtoMessage()    {}\n\nfunc (m *MemcacheStatsRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MergedNamespaceStats struct {\n\tHits             *uint64 `protobuf:\"varint,1,req,name=hits\" json:\"hits,omitempty\"`\n\tMisses           *uint64 `protobuf:\"varint,2,req,name=misses\" json:\"misses,omitempty\"`\n\tByteHits         *uint64 `protobuf:\"varint,3,req,name=byte_hits\" json:\"byte_hits,omitempty\"`\n\tItems            *uint64 `protobuf:\"varint,4,req,name=items\" json:\"items,omitempty\"`\n\tBytes            *uint64 `protobuf:\"varint,5,req,name=bytes\" json:\"bytes,omitempty\"`\n\tOldestItemAge    *uint32 `protobuf:\"fixed32,6,req,name=oldest_item_age\" json:\"oldest_item_age,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *MergedNamespaceStats) Reset()         { *m = MergedNamespaceStats{} }\nfunc (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) }\nfunc (*MergedNamespaceStats) ProtoMessage()    {}\n\nfunc (m *MergedNamespaceStats) GetHits() uint64 {\n\tif m != nil && m.Hits != nil {\n\t\treturn *m.Hits\n\t}\n\treturn 0\n}\n\nfunc (m *MergedNamespaceStats) GetMisses() uint64 {\n\tif m != nil && m.Misses != nil {\n\t\treturn *m.Misses\n\t}\n\treturn 0\n}\n\nfunc (m *MergedNamespaceStats) GetByteHits() uint64 {\n\tif m != nil && m.ByteHits != nil {\n\t\treturn *m.ByteHits\n\t}\n\treturn 0\n}\n\nfunc (m *MergedNamespaceStats) GetItems() uint64 {\n\tif m != nil && m.Items != nil {\n\t\treturn *m.Items\n\t}\n\treturn 0\n}\n\nfunc (m *MergedNamespaceStats) GetBytes() uint64 {\n\tif m != nil && m.Bytes != nil {\n\t\treturn *m.Bytes\n\t}\n\treturn 0\n}\n\nfunc (m *MergedNamespaceStats) GetOldestItemAge() uint32 {\n\tif m != nil && m.OldestItemAge != nil {\n\t\treturn *m.OldestItemAge\n\t}\n\treturn 0\n}\n\ntype MemcacheStatsResponse struct {\n\tStats            *MergedNamespaceStats `protobuf:\"bytes,1,opt,name=stats\" json:\"stats,omitempty\"`\n\tXXX_unrecognized []byte                `json:\"-\"`\n}\n\nfunc (m *MemcacheStatsResponse) Reset()         { *m = MemcacheStatsResponse{} }\nfunc (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheStatsResponse) ProtoMessage()    {}\n\nfunc (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats {\n\tif m != nil {\n\t\treturn m.Stats\n\t}\n\treturn nil\n}\n\ntype MemcacheGrabTailRequest struct {\n\tItemCount        *int32       `protobuf:\"varint,1,req,name=item_count\" json:\"item_count,omitempty\"`\n\tNameSpace        *string      `protobuf:\"bytes,2,opt,name=name_space,def=\" json:\"name_space,omitempty\"`\n\tOverride         *AppOverride `protobuf:\"bytes,3,opt,name=override\" json:\"override,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *MemcacheGrabTailRequest) Reset()         { *m = MemcacheGrabTailRequest{} }\nfunc (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheGrabTailRequest) ProtoMessage()    {}\n\nfunc (m *MemcacheGrabTailRequest) GetItemCount() int32 {\n\tif m != nil && m.ItemCount != nil {\n\t\treturn *m.ItemCount\n\t}\n\treturn 0\n}\n\nfunc (m *MemcacheGrabTailRequest) GetNameSpace() string {\n\tif m != nil && m.NameSpace != nil {\n\t\treturn *m.NameSpace\n\t}\n\treturn \"\"\n}\n\nfunc (m *MemcacheGrabTailRequest) GetOverride() *AppOverride {\n\tif m != nil {\n\t\treturn m.Override\n\t}\n\treturn nil\n}\n\ntype MemcacheGrabTailResponse struct {\n\tItem             []*MemcacheGrabTailResponse_Item `protobuf:\"group,1,rep,name=Item\" json:\"item,omitempty\"`\n\tXXX_unrecognized []byte                           `json:\"-\"`\n}\n\nfunc (m *MemcacheGrabTailResponse) Reset()         { *m = MemcacheGrabTailResponse{} }\nfunc (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheGrabTailResponse) ProtoMessage()    {}\n\nfunc (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item {\n\tif m != nil {\n\t\treturn m.Item\n\t}\n\treturn nil\n}\n\ntype MemcacheGrabTailResponse_Item struct {\n\tValue            []byte  `protobuf:\"bytes,2,req,name=value\" json:\"value,omitempty\"`\n\tFlags            *uint32 `protobuf:\"fixed32,3,opt,name=flags\" json:\"flags,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *MemcacheGrabTailResponse_Item) Reset()         { *m = MemcacheGrabTailResponse_Item{} }\nfunc (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) }\nfunc (*MemcacheGrabTailResponse_Item) ProtoMessage()    {}\n\nfunc (m *MemcacheGrabTailResponse_Item) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\nfunc (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 {\n\tif m != nil && m.Flags != nil {\n\t\treturn *m.Flags\n\t}\n\treturn 0\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"memcache\";\n\npackage appengine;\n\nmessage MemcacheServiceError {\n  enum ErrorCode {\n    OK = 0;\n    UNSPECIFIED_ERROR = 1;\n    NAMESPACE_NOT_SET = 2;\n    PERMISSION_DENIED = 3;\n    INVALID_VALUE = 6;\n  }\n}\n\nmessage AppOverride {\n  required string app_id = 1;\n\n  optional int32 num_memcacheg_backends = 2 [deprecated=true];\n  optional bool ignore_shardlock = 3 [deprecated=true];\n  optional string memcache_pool_hint = 4 [deprecated=true];\n  optional bytes memcache_sharding_strategy = 5 [deprecated=true];\n}\n\nmessage MemcacheGetRequest {\n  repeated bytes key = 1;\n  optional string name_space = 2 [default = \"\"];\n  optional bool for_cas = 4;\n  optional AppOverride override = 5;\n}\n\nmessage MemcacheGetResponse {\n  repeated group Item = 1 {\n    required bytes key = 2;\n    required bytes value = 3;\n    optional fixed32 flags = 4;\n    optional fixed64 cas_id = 5;\n    optional int32 expires_in_seconds = 6;\n  }\n}\n\nmessage MemcacheSetRequest {\n  enum SetPolicy {\n    SET = 1;\n    ADD = 2;\n    REPLACE = 3;\n    CAS = 4;\n  }\n  repeated group Item = 1 {\n    required bytes key = 2;\n    required bytes value = 3;\n\n    optional fixed32 flags = 4;\n    optional SetPolicy set_policy = 5 [default = SET];\n    optional fixed32 expiration_time = 6 [default = 0];\n\n    optional fixed64 cas_id = 8;\n    optional bool for_cas = 9;\n  }\n  optional string name_space = 7 [default = \"\"];\n  optional AppOverride override = 10;\n}\n\nmessage MemcacheSetResponse {\n  enum SetStatusCode {\n    STORED = 1;\n    NOT_STORED = 2;\n    ERROR = 3;\n    EXISTS = 4;\n  }\n  repeated SetStatusCode set_status = 1;\n}\n\nmessage MemcacheDeleteRequest {\n  repeated group Item = 1 {\n    required bytes key = 2;\n    optional fixed32 delete_time = 3 [default = 0];\n  }\n  optional string name_space = 4 [default = \"\"];\n  optional AppOverride override = 5;\n}\n\nmessage MemcacheDeleteResponse {\n  enum DeleteStatusCode {\n    DELETED = 1;\n    NOT_FOUND = 2;\n  }\n  repeated DeleteStatusCode delete_status = 1;\n}\n\nmessage MemcacheIncrementRequest {\n  enum Direction {\n    INCREMENT = 1;\n    DECREMENT = 2;\n  }\n  required bytes key = 1;\n  optional string name_space = 4 [default = \"\"];\n\n  optional uint64 delta = 2 [default = 1];\n  optional Direction direction = 3 [default = INCREMENT];\n\n  optional uint64 initial_value = 5;\n  optional fixed32 initial_flags = 6;\n  optional AppOverride override = 7;\n}\n\nmessage MemcacheIncrementResponse {\n  enum IncrementStatusCode {\n    OK = 1;\n    NOT_CHANGED = 2;\n    ERROR = 3;\n  }\n\n  optional uint64 new_value = 1;\n  optional IncrementStatusCode increment_status = 2;\n}\n\nmessage MemcacheBatchIncrementRequest {\n  optional string name_space = 1 [default = \"\"];\n  repeated MemcacheIncrementRequest item = 2;\n  optional AppOverride override = 3;\n}\n\nmessage MemcacheBatchIncrementResponse {\n  repeated MemcacheIncrementResponse item = 1;\n}\n\nmessage MemcacheFlushRequest {\n  optional AppOverride override = 1;\n}\n\nmessage MemcacheFlushResponse {\n}\n\nmessage MemcacheStatsRequest {\n  optional AppOverride override = 1;\n}\n\nmessage MergedNamespaceStats {\n  required uint64 hits = 1;\n  required uint64 misses = 2;\n  required uint64 byte_hits = 3;\n\n  required uint64 items = 4;\n  required uint64 bytes = 5;\n\n  required fixed32 oldest_item_age = 6;\n}\n\nmessage MemcacheStatsResponse {\n  optional MergedNamespaceStats stats = 1;\n}\n\nmessage MemcacheGrabTailRequest {\n  required int32 item_count = 1;\n  optional string name_space = 2 [default = \"\"];\n  optional AppOverride override = 3;\n}\n\nmessage MemcacheGrabTailResponse {\n  repeated group Item = 1 {\n    required bytes value = 2;\n    optional fixed32 flags = 3;\n  }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/metadata.go",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage internal\n\n// This file has code for accessing metadata.\n//\n// References:\n//\thttps://cloud.google.com/compute/docs/metadata\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n)\n\nconst (\n\tmetadataHost = \"metadata\"\n\tmetadataPath = \"/computeMetadata/v1/\"\n)\n\nvar (\n\tmetadataRequestHeaders = http.Header{\n\t\t\"X-Google-Metadata-Request\": []string{\"True\"},\n\t}\n)\n\n// TODO(dsymonds): Do we need to support default values, like Python?\nfunc mustGetMetadata(key string) []byte {\n\tb, err := getMetadata(key)\n\tif err != nil {\n\t\tlog.Fatalf(\"Metadata fetch failed: %v\", err)\n\t}\n\treturn b\n}\n\nfunc getMetadata(key string) ([]byte, error) {\n\t// TODO(dsymonds): May need to use url.Parse to support keys with query args.\n\treq := &http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: &url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost:   metadataHost,\n\t\t\tPath:   metadataPath + key,\n\t\t},\n\t\tHeader: metadataRequestHeaders,\n\t\tHost:   metadataHost,\n\t}\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"metadata server returned HTTP %d\", resp.StatusCode)\n\t}\n\treturn ioutil.ReadAll(resp.Body)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/modules/modules_service.proto\n// DO NOT EDIT!\n\n/*\nPackage modules is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/modules/modules_service.proto\n\nIt has these top-level messages:\n\tModulesServiceError\n\tGetModulesRequest\n\tGetModulesResponse\n\tGetVersionsRequest\n\tGetVersionsResponse\n\tGetDefaultVersionRequest\n\tGetDefaultVersionResponse\n\tGetNumInstancesRequest\n\tGetNumInstancesResponse\n\tSetNumInstancesRequest\n\tSetNumInstancesResponse\n\tStartModuleRequest\n\tStartModuleResponse\n\tStopModuleRequest\n\tStopModuleResponse\n\tGetHostnameRequest\n\tGetHostnameResponse\n*/\npackage modules\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype ModulesServiceError_ErrorCode int32\n\nconst (\n\tModulesServiceError_OK                ModulesServiceError_ErrorCode = 0\n\tModulesServiceError_INVALID_MODULE    ModulesServiceError_ErrorCode = 1\n\tModulesServiceError_INVALID_VERSION   ModulesServiceError_ErrorCode = 2\n\tModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3\n\tModulesServiceError_TRANSIENT_ERROR   ModulesServiceError_ErrorCode = 4\n\tModulesServiceError_UNEXPECTED_STATE  ModulesServiceError_ErrorCode = 5\n)\n\nvar ModulesServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"INVALID_MODULE\",\n\t2: \"INVALID_VERSION\",\n\t3: \"INVALID_INSTANCES\",\n\t4: \"TRANSIENT_ERROR\",\n\t5: \"UNEXPECTED_STATE\",\n}\nvar ModulesServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                0,\n\t\"INVALID_MODULE\":    1,\n\t\"INVALID_VERSION\":   2,\n\t\"INVALID_INSTANCES\": 3,\n\t\"TRANSIENT_ERROR\":   4,\n\t\"UNEXPECTED_STATE\":  5,\n}\n\nfunc (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {\n\tp := new(ModulesServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x ModulesServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, \"ModulesServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ModulesServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype ModulesServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ModulesServiceError) Reset()         { *m = ModulesServiceError{} }\nfunc (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*ModulesServiceError) ProtoMessage()    {}\n\ntype GetModulesRequest struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *GetModulesRequest) Reset()         { *m = GetModulesRequest{} }\nfunc (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetModulesRequest) ProtoMessage()    {}\n\ntype GetModulesResponse struct {\n\tModule           []string `protobuf:\"bytes,1,rep,name=module\" json:\"module,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *GetModulesResponse) Reset()         { *m = GetModulesResponse{} }\nfunc (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetModulesResponse) ProtoMessage()    {}\n\nfunc (m *GetModulesResponse) GetModule() []string {\n\tif m != nil {\n\t\treturn m.Module\n\t}\n\treturn nil\n}\n\ntype GetVersionsRequest struct {\n\tModule           *string `protobuf:\"bytes,1,opt,name=module\" json:\"module,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetVersionsRequest) Reset()         { *m = GetVersionsRequest{} }\nfunc (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetVersionsRequest) ProtoMessage()    {}\n\nfunc (m *GetVersionsRequest) GetModule() string {\n\tif m != nil && m.Module != nil {\n\t\treturn *m.Module\n\t}\n\treturn \"\"\n}\n\ntype GetVersionsResponse struct {\n\tVersion          []string `protobuf:\"bytes,1,rep,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *GetVersionsResponse) Reset()         { *m = GetVersionsResponse{} }\nfunc (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetVersionsResponse) ProtoMessage()    {}\n\nfunc (m *GetVersionsResponse) GetVersion() []string {\n\tif m != nil {\n\t\treturn m.Version\n\t}\n\treturn nil\n}\n\ntype GetDefaultVersionRequest struct {\n\tModule           *string `protobuf:\"bytes,1,opt,name=module\" json:\"module,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetDefaultVersionRequest) Reset()         { *m = GetDefaultVersionRequest{} }\nfunc (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetDefaultVersionRequest) ProtoMessage()    {}\n\nfunc (m *GetDefaultVersionRequest) GetModule() string {\n\tif m != nil && m.Module != nil {\n\t\treturn *m.Module\n\t}\n\treturn \"\"\n}\n\ntype GetDefaultVersionResponse struct {\n\tVersion          *string `protobuf:\"bytes,1,req,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetDefaultVersionResponse) Reset()         { *m = GetDefaultVersionResponse{} }\nfunc (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetDefaultVersionResponse) ProtoMessage()    {}\n\nfunc (m *GetDefaultVersionResponse) GetVersion() string {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn \"\"\n}\n\ntype GetNumInstancesRequest struct {\n\tModule           *string `protobuf:\"bytes,1,opt,name=module\" json:\"module,omitempty\"`\n\tVersion          *string `protobuf:\"bytes,2,opt,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetNumInstancesRequest) Reset()         { *m = GetNumInstancesRequest{} }\nfunc (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetNumInstancesRequest) ProtoMessage()    {}\n\nfunc (m *GetNumInstancesRequest) GetModule() string {\n\tif m != nil && m.Module != nil {\n\t\treturn *m.Module\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetNumInstancesRequest) GetVersion() string {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn \"\"\n}\n\ntype GetNumInstancesResponse struct {\n\tInstances        *int64 `protobuf:\"varint,1,req,name=instances\" json:\"instances,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *GetNumInstancesResponse) Reset()         { *m = GetNumInstancesResponse{} }\nfunc (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetNumInstancesResponse) ProtoMessage()    {}\n\nfunc (m *GetNumInstancesResponse) GetInstances() int64 {\n\tif m != nil && m.Instances != nil {\n\t\treturn *m.Instances\n\t}\n\treturn 0\n}\n\ntype SetNumInstancesRequest struct {\n\tModule           *string `protobuf:\"bytes,1,opt,name=module\" json:\"module,omitempty\"`\n\tVersion          *string `protobuf:\"bytes,2,opt,name=version\" json:\"version,omitempty\"`\n\tInstances        *int64  `protobuf:\"varint,3,req,name=instances\" json:\"instances,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SetNumInstancesRequest) Reset()         { *m = SetNumInstancesRequest{} }\nfunc (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SetNumInstancesRequest) ProtoMessage()    {}\n\nfunc (m *SetNumInstancesRequest) GetModule() string {\n\tif m != nil && m.Module != nil {\n\t\treturn *m.Module\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetNumInstancesRequest) GetVersion() string {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetNumInstancesRequest) GetInstances() int64 {\n\tif m != nil && m.Instances != nil {\n\t\treturn *m.Instances\n\t}\n\treturn 0\n}\n\ntype SetNumInstancesResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SetNumInstancesResponse) Reset()         { *m = SetNumInstancesResponse{} }\nfunc (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }\nfunc (*SetNumInstancesResponse) ProtoMessage()    {}\n\ntype StartModuleRequest struct {\n\tModule           *string `protobuf:\"bytes,1,req,name=module\" json:\"module,omitempty\"`\n\tVersion          *string `protobuf:\"bytes,2,req,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *StartModuleRequest) Reset()         { *m = StartModuleRequest{} }\nfunc (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StartModuleRequest) ProtoMessage()    {}\n\nfunc (m *StartModuleRequest) GetModule() string {\n\tif m != nil && m.Module != nil {\n\t\treturn *m.Module\n\t}\n\treturn \"\"\n}\n\nfunc (m *StartModuleRequest) GetVersion() string {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn \"\"\n}\n\ntype StartModuleResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *StartModuleResponse) Reset()         { *m = StartModuleResponse{} }\nfunc (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }\nfunc (*StartModuleResponse) ProtoMessage()    {}\n\ntype StopModuleRequest struct {\n\tModule           *string `protobuf:\"bytes,1,opt,name=module\" json:\"module,omitempty\"`\n\tVersion          *string `protobuf:\"bytes,2,opt,name=version\" json:\"version,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *StopModuleRequest) Reset()         { *m = StopModuleRequest{} }\nfunc (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StopModuleRequest) ProtoMessage()    {}\n\nfunc (m *StopModuleRequest) GetModule() string {\n\tif m != nil && m.Module != nil {\n\t\treturn *m.Module\n\t}\n\treturn \"\"\n}\n\nfunc (m *StopModuleRequest) GetVersion() string {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn \"\"\n}\n\ntype StopModuleResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *StopModuleResponse) Reset()         { *m = StopModuleResponse{} }\nfunc (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }\nfunc (*StopModuleResponse) ProtoMessage()    {}\n\ntype GetHostnameRequest struct {\n\tModule           *string `protobuf:\"bytes,1,opt,name=module\" json:\"module,omitempty\"`\n\tVersion          *string `protobuf:\"bytes,2,opt,name=version\" json:\"version,omitempty\"`\n\tInstance         *string `protobuf:\"bytes,3,opt,name=instance\" json:\"instance,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetHostnameRequest) Reset()         { *m = GetHostnameRequest{} }\nfunc (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetHostnameRequest) ProtoMessage()    {}\n\nfunc (m *GetHostnameRequest) GetModule() string {\n\tif m != nil && m.Module != nil {\n\t\treturn *m.Module\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetHostnameRequest) GetVersion() string {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetHostnameRequest) GetInstance() string {\n\tif m != nil && m.Instance != nil {\n\t\treturn *m.Instance\n\t}\n\treturn \"\"\n}\n\ntype GetHostnameResponse struct {\n\tHostname         *string `protobuf:\"bytes,1,req,name=hostname\" json:\"hostname,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetHostnameResponse) Reset()         { *m = GetHostnameResponse{} }\nfunc (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetHostnameResponse) ProtoMessage()    {}\n\nfunc (m *GetHostnameResponse) GetHostname() string {\n\tif m != nil && m.Hostname != nil {\n\t\treturn *m.Hostname\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/modules/modules_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"modules\";\n\npackage appengine;\n\nmessage ModulesServiceError {\n  enum ErrorCode {\n    OK  = 0;\n    INVALID_MODULE = 1;\n    INVALID_VERSION = 2;\n    INVALID_INSTANCES = 3;\n    TRANSIENT_ERROR = 4;\n    UNEXPECTED_STATE = 5;\n  }\n}\n\nmessage GetModulesRequest {\n}\n\nmessage GetModulesResponse {\n  repeated string module = 1;\n}\n\nmessage GetVersionsRequest {\n  optional string module = 1;\n}\n\nmessage GetVersionsResponse {\n  repeated string version = 1;\n}\n\nmessage GetDefaultVersionRequest {\n  optional string module = 1;\n}\n\nmessage GetDefaultVersionResponse {\n  required string version = 1;\n}\n\nmessage GetNumInstancesRequest {\n  optional string module = 1;\n  optional string version = 2;\n}\n\nmessage GetNumInstancesResponse {\n  required int64 instances = 1;\n}\n\nmessage SetNumInstancesRequest {\n  optional string module = 1;\n  optional string version = 2;\n  required int64 instances = 3;\n}\n\nmessage SetNumInstancesResponse {}\n\nmessage StartModuleRequest {\n  required string module = 1;\n  required string version = 2;\n}\n\nmessage StartModuleResponse {}\n\nmessage StopModuleRequest {\n  optional string module = 1;\n  optional string version = 2;\n}\n\nmessage StopModuleResponse {}\n\nmessage GetHostnameRequest {\n  optional string module = 1;\n  optional string version = 2;\n  optional string instance = 3;\n}\n\nmessage GetHostnameResponse {\n  required string hostname = 1;\n}\n\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/net.go",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage internal\n\n// This file implements a network dialer that limits the number of concurrent connections.\n// It is only used for API calls.\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.\n\nfunc limitRelease() {\n\t// non-blocking\n\tselect {\n\tcase <-limitSem:\n\tdefault:\n\t\t// This should not normally happen.\n\t\tlog.Print(\"appengine: unbalanced limitSem release!\")\n\t}\n}\n\nfunc limitDial(network, addr string) (net.Conn, error) {\n\tlimitSem <- 1\n\n\t// Dial with a timeout in case the API host is MIA.\n\t// The connection should normally be very fast.\n\tconn, err := net.DialTimeout(network, addr, 500*time.Millisecond)\n\tif err != nil {\n\t\tlimitRelease()\n\t\treturn nil, err\n\t}\n\tlc := &limitConn{Conn: conn}\n\truntime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required\n\treturn lc, nil\n}\n\ntype limitConn struct {\n\tclose sync.Once\n\tnet.Conn\n}\n\nfunc (lc *limitConn) Close() error {\n\tdefer lc.close.Do(func() {\n\t\tlimitRelease()\n\t\truntime.SetFinalizer(lc, nil)\n\t})\n\treturn lc.Conn.Close()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/regen.sh",
    "content": "#!/bin/bash -e\n#\n# This script rebuilds the generated code for the protocol buffers.\n# To run this you will need protoc and goprotobuf installed;\n# see https://github.com/golang/protobuf for instructions.\n\nPKG=google.golang.org/appengine\n\nfunction die() {\n\techo 1>&2 $*\n\texit 1\n}\n\n# Sanity check that the right tools are accessible.\nfor tool in go protoc protoc-gen-go; do\n\tq=$(which $tool) || die \"didn't find $tool\"\n\techo 1>&2 \"$tool: $q\"\ndone\n\necho -n 1>&2 \"finding package dir... \"\npkgdir=$(go list -f '{{.Dir}}' $PKG)\necho 1>&2 $pkgdir\nbase=$(echo $pkgdir | sed \"s,/$PKG\\$,,\")\necho 1>&2 \"base: $base\"\ncd $base\n\n# Run protoc once per package.\nfor dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do\n\techo 1>&2 \"* $dir\"\n\tprotoc --go_out=. $dir/*.proto\ndone\n\nfor f in $(find $PKG/internal -name '*.pb.go'); do\n  # Remove proto.RegisterEnum calls.\n  # These cause duplicate registration panics when these packages\n  # are used on classic App Engine. proto.RegisterEnum only affects\n  # parsing the text format; we don't care about that.\n  # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17\n  sed -i '/proto.RegisterEnum/d' $f\ndone\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/remote_api/remote_api.proto\n// DO NOT EDIT!\n\n/*\nPackage remote_api is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/remote_api/remote_api.proto\n\nIt has these top-level messages:\n\tRequest\n\tApplicationError\n\tRpcError\n\tResponse\n*/\npackage remote_api\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype RpcError_ErrorCode int32\n\nconst (\n\tRpcError_UNKNOWN             RpcError_ErrorCode = 0\n\tRpcError_CALL_NOT_FOUND      RpcError_ErrorCode = 1\n\tRpcError_PARSE_ERROR         RpcError_ErrorCode = 2\n\tRpcError_SECURITY_VIOLATION  RpcError_ErrorCode = 3\n\tRpcError_OVER_QUOTA          RpcError_ErrorCode = 4\n\tRpcError_REQUEST_TOO_LARGE   RpcError_ErrorCode = 5\n\tRpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6\n\tRpcError_FEATURE_DISABLED    RpcError_ErrorCode = 7\n\tRpcError_BAD_REQUEST         RpcError_ErrorCode = 8\n\tRpcError_RESPONSE_TOO_LARGE  RpcError_ErrorCode = 9\n\tRpcError_CANCELLED           RpcError_ErrorCode = 10\n\tRpcError_REPLAY_ERROR        RpcError_ErrorCode = 11\n\tRpcError_DEADLINE_EXCEEDED   RpcError_ErrorCode = 12\n)\n\nvar RpcError_ErrorCode_name = map[int32]string{\n\t0:  \"UNKNOWN\",\n\t1:  \"CALL_NOT_FOUND\",\n\t2:  \"PARSE_ERROR\",\n\t3:  \"SECURITY_VIOLATION\",\n\t4:  \"OVER_QUOTA\",\n\t5:  \"REQUEST_TOO_LARGE\",\n\t6:  \"CAPABILITY_DISABLED\",\n\t7:  \"FEATURE_DISABLED\",\n\t8:  \"BAD_REQUEST\",\n\t9:  \"RESPONSE_TOO_LARGE\",\n\t10: \"CANCELLED\",\n\t11: \"REPLAY_ERROR\",\n\t12: \"DEADLINE_EXCEEDED\",\n}\nvar RpcError_ErrorCode_value = map[string]int32{\n\t\"UNKNOWN\":             0,\n\t\"CALL_NOT_FOUND\":      1,\n\t\"PARSE_ERROR\":         2,\n\t\"SECURITY_VIOLATION\":  3,\n\t\"OVER_QUOTA\":          4,\n\t\"REQUEST_TOO_LARGE\":   5,\n\t\"CAPABILITY_DISABLED\": 6,\n\t\"FEATURE_DISABLED\":    7,\n\t\"BAD_REQUEST\":         8,\n\t\"RESPONSE_TOO_LARGE\":  9,\n\t\"CANCELLED\":           10,\n\t\"REPLAY_ERROR\":        11,\n\t\"DEADLINE_EXCEEDED\":   12,\n}\n\nfunc (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {\n\tp := new(RpcError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x RpcError_ErrorCode) String() string {\n\treturn proto.EnumName(RpcError_ErrorCode_name, int32(x))\n}\nfunc (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, \"RpcError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = RpcError_ErrorCode(value)\n\treturn nil\n}\n\ntype Request struct {\n\tServiceName      *string `protobuf:\"bytes,2,req,name=service_name\" json:\"service_name,omitempty\"`\n\tMethod           *string `protobuf:\"bytes,3,req,name=method\" json:\"method,omitempty\"`\n\tRequest          []byte  `protobuf:\"bytes,4,req,name=request\" json:\"request,omitempty\"`\n\tRequestId        *string `protobuf:\"bytes,5,opt,name=request_id\" json:\"request_id,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Request) Reset()         { *m = Request{} }\nfunc (m *Request) String() string { return proto.CompactTextString(m) }\nfunc (*Request) ProtoMessage()    {}\n\nfunc (m *Request) GetServiceName() string {\n\tif m != nil && m.ServiceName != nil {\n\t\treturn *m.ServiceName\n\t}\n\treturn \"\"\n}\n\nfunc (m *Request) GetMethod() string {\n\tif m != nil && m.Method != nil {\n\t\treturn *m.Method\n\t}\n\treturn \"\"\n}\n\nfunc (m *Request) GetRequest() []byte {\n\tif m != nil {\n\t\treturn m.Request\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetRequestId() string {\n\tif m != nil && m.RequestId != nil {\n\t\treturn *m.RequestId\n\t}\n\treturn \"\"\n}\n\ntype ApplicationError struct {\n\tCode             *int32  `protobuf:\"varint,1,req,name=code\" json:\"code,omitempty\"`\n\tDetail           *string `protobuf:\"bytes,2,req,name=detail\" json:\"detail,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ApplicationError) Reset()         { *m = ApplicationError{} }\nfunc (m *ApplicationError) String() string { return proto.CompactTextString(m) }\nfunc (*ApplicationError) ProtoMessage()    {}\n\nfunc (m *ApplicationError) GetCode() int32 {\n\tif m != nil && m.Code != nil {\n\t\treturn *m.Code\n\t}\n\treturn 0\n}\n\nfunc (m *ApplicationError) GetDetail() string {\n\tif m != nil && m.Detail != nil {\n\t\treturn *m.Detail\n\t}\n\treturn \"\"\n}\n\ntype RpcError struct {\n\tCode             *int32  `protobuf:\"varint,1,req,name=code\" json:\"code,omitempty\"`\n\tDetail           *string `protobuf:\"bytes,2,opt,name=detail\" json:\"detail,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *RpcError) Reset()         { *m = RpcError{} }\nfunc (m *RpcError) String() string { return proto.CompactTextString(m) }\nfunc (*RpcError) ProtoMessage()    {}\n\nfunc (m *RpcError) GetCode() int32 {\n\tif m != nil && m.Code != nil {\n\t\treturn *m.Code\n\t}\n\treturn 0\n}\n\nfunc (m *RpcError) GetDetail() string {\n\tif m != nil && m.Detail != nil {\n\t\treturn *m.Detail\n\t}\n\treturn \"\"\n}\n\ntype Response struct {\n\tResponse         []byte            `protobuf:\"bytes,1,opt,name=response\" json:\"response,omitempty\"`\n\tException        []byte            `protobuf:\"bytes,2,opt,name=exception\" json:\"exception,omitempty\"`\n\tApplicationError *ApplicationError `protobuf:\"bytes,3,opt,name=application_error\" json:\"application_error,omitempty\"`\n\tJavaException    []byte            `protobuf:\"bytes,4,opt,name=java_exception\" json:\"java_exception,omitempty\"`\n\tRpcError         *RpcError         `protobuf:\"bytes,5,opt,name=rpc_error\" json:\"rpc_error,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *Response) Reset()         { *m = Response{} }\nfunc (m *Response) String() string { return proto.CompactTextString(m) }\nfunc (*Response) ProtoMessage()    {}\n\nfunc (m *Response) GetResponse() []byte {\n\tif m != nil {\n\t\treturn m.Response\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetException() []byte {\n\tif m != nil {\n\t\treturn m.Exception\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetApplicationError() *ApplicationError {\n\tif m != nil {\n\t\treturn m.ApplicationError\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetJavaException() []byte {\n\tif m != nil {\n\t\treturn m.JavaException\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetRpcError() *RpcError {\n\tif m != nil {\n\t\treturn m.RpcError\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"remote_api\";\n\npackage remote_api;\n\nmessage Request {\n  required string service_name = 2;\n  required string method = 3;\n  required bytes request = 4;\n  optional string request_id = 5;\n}\n\nmessage ApplicationError {\n  required int32 code = 1;\n  required string detail = 2;\n}\n\nmessage RpcError {\n  enum ErrorCode {\n    UNKNOWN = 0;\n    CALL_NOT_FOUND = 1;\n    PARSE_ERROR = 2;\n    SECURITY_VIOLATION = 3;\n    OVER_QUOTA = 4;\n    REQUEST_TOO_LARGE = 5;\n    CAPABILITY_DISABLED = 6;\n    FEATURE_DISABLED = 7;\n    BAD_REQUEST = 8;\n    RESPONSE_TOO_LARGE = 9;\n    CANCELLED = 10;\n    REPLAY_ERROR = 11;\n    DEADLINE_EXCEEDED = 12;\n  }\n  required int32 code = 1;\n  optional string detail = 2;\n}\n\nmessage Response {\n  optional bytes response = 1;\n  optional bytes exception = 2;\n  optional ApplicationError application_error = 3;\n  optional bytes java_exception = 4;\n  optional RpcError rpc_error = 5;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/search/search.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/search/search.proto\n// DO NOT EDIT!\n\n/*\nPackage search is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/search/search.proto\n\nIt has these top-level messages:\n\tScope\n\tEntry\n\tAccessControlList\n\tFieldValue\n\tField\n\tFieldTypes\n\tIndexShardSettings\n\tFacetValue\n\tFacet\n\tDocumentMetadata\n\tDocument\n\tSearchServiceError\n\tRequestStatus\n\tIndexSpec\n\tIndexMetadata\n\tIndexDocumentParams\n\tIndexDocumentRequest\n\tIndexDocumentResponse\n\tDeleteDocumentParams\n\tDeleteDocumentRequest\n\tDeleteDocumentResponse\n\tListDocumentsParams\n\tListDocumentsRequest\n\tListDocumentsResponse\n\tListIndexesParams\n\tListIndexesRequest\n\tListIndexesResponse\n\tDeleteSchemaParams\n\tDeleteSchemaRequest\n\tDeleteSchemaResponse\n\tSortSpec\n\tScorerSpec\n\tFieldSpec\n\tFacetRange\n\tFacetRequestParam\n\tFacetAutoDetectParam\n\tFacetRequest\n\tFacetRefinement\n\tSearchParams\n\tSearchRequest\n\tFacetResultValue\n\tFacetResult\n\tSearchResult\n\tSearchResponse\n*/\npackage search\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype Scope_Type int32\n\nconst (\n\tScope_USER_BY_CANONICAL_ID    Scope_Type = 1\n\tScope_USER_BY_EMAIL           Scope_Type = 2\n\tScope_GROUP_BY_CANONICAL_ID   Scope_Type = 3\n\tScope_GROUP_BY_EMAIL          Scope_Type = 4\n\tScope_GROUP_BY_DOMAIN         Scope_Type = 5\n\tScope_ALL_USERS               Scope_Type = 6\n\tScope_ALL_AUTHENTICATED_USERS Scope_Type = 7\n)\n\nvar Scope_Type_name = map[int32]string{\n\t1: \"USER_BY_CANONICAL_ID\",\n\t2: \"USER_BY_EMAIL\",\n\t3: \"GROUP_BY_CANONICAL_ID\",\n\t4: \"GROUP_BY_EMAIL\",\n\t5: \"GROUP_BY_DOMAIN\",\n\t6: \"ALL_USERS\",\n\t7: \"ALL_AUTHENTICATED_USERS\",\n}\nvar Scope_Type_value = map[string]int32{\n\t\"USER_BY_CANONICAL_ID\":    1,\n\t\"USER_BY_EMAIL\":           2,\n\t\"GROUP_BY_CANONICAL_ID\":   3,\n\t\"GROUP_BY_EMAIL\":          4,\n\t\"GROUP_BY_DOMAIN\":         5,\n\t\"ALL_USERS\":               6,\n\t\"ALL_AUTHENTICATED_USERS\": 7,\n}\n\nfunc (x Scope_Type) Enum() *Scope_Type {\n\tp := new(Scope_Type)\n\t*p = x\n\treturn p\n}\nfunc (x Scope_Type) String() string {\n\treturn proto.EnumName(Scope_Type_name, int32(x))\n}\nfunc (x *Scope_Type) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, \"Scope_Type\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Scope_Type(value)\n\treturn nil\n}\n\ntype Entry_Permission int32\n\nconst (\n\tEntry_READ         Entry_Permission = 1\n\tEntry_WRITE        Entry_Permission = 2\n\tEntry_FULL_CONTROL Entry_Permission = 3\n)\n\nvar Entry_Permission_name = map[int32]string{\n\t1: \"READ\",\n\t2: \"WRITE\",\n\t3: \"FULL_CONTROL\",\n}\nvar Entry_Permission_value = map[string]int32{\n\t\"READ\":         1,\n\t\"WRITE\":        2,\n\t\"FULL_CONTROL\": 3,\n}\n\nfunc (x Entry_Permission) Enum() *Entry_Permission {\n\tp := new(Entry_Permission)\n\t*p = x\n\treturn p\n}\nfunc (x Entry_Permission) String() string {\n\treturn proto.EnumName(Entry_Permission_name, int32(x))\n}\nfunc (x *Entry_Permission) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, \"Entry_Permission\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Entry_Permission(value)\n\treturn nil\n}\n\ntype FieldValue_ContentType int32\n\nconst (\n\tFieldValue_TEXT   FieldValue_ContentType = 0\n\tFieldValue_HTML   FieldValue_ContentType = 1\n\tFieldValue_ATOM   FieldValue_ContentType = 2\n\tFieldValue_DATE   FieldValue_ContentType = 3\n\tFieldValue_NUMBER FieldValue_ContentType = 4\n\tFieldValue_GEO    FieldValue_ContentType = 5\n)\n\nvar FieldValue_ContentType_name = map[int32]string{\n\t0: \"TEXT\",\n\t1: \"HTML\",\n\t2: \"ATOM\",\n\t3: \"DATE\",\n\t4: \"NUMBER\",\n\t5: \"GEO\",\n}\nvar FieldValue_ContentType_value = map[string]int32{\n\t\"TEXT\":   0,\n\t\"HTML\":   1,\n\t\"ATOM\":   2,\n\t\"DATE\":   3,\n\t\"NUMBER\": 4,\n\t\"GEO\":    5,\n}\n\nfunc (x FieldValue_ContentType) Enum() *FieldValue_ContentType {\n\tp := new(FieldValue_ContentType)\n\t*p = x\n\treturn p\n}\nfunc (x FieldValue_ContentType) String() string {\n\treturn proto.EnumName(FieldValue_ContentType_name, int32(x))\n}\nfunc (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, \"FieldValue_ContentType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = FieldValue_ContentType(value)\n\treturn nil\n}\n\ntype FacetValue_ContentType int32\n\nconst (\n\tFacetValue_ATOM   FacetValue_ContentType = 2\n\tFacetValue_NUMBER FacetValue_ContentType = 4\n)\n\nvar FacetValue_ContentType_name = map[int32]string{\n\t2: \"ATOM\",\n\t4: \"NUMBER\",\n}\nvar FacetValue_ContentType_value = map[string]int32{\n\t\"ATOM\":   2,\n\t\"NUMBER\": 4,\n}\n\nfunc (x FacetValue_ContentType) Enum() *FacetValue_ContentType {\n\tp := new(FacetValue_ContentType)\n\t*p = x\n\treturn p\n}\nfunc (x FacetValue_ContentType) String() string {\n\treturn proto.EnumName(FacetValue_ContentType_name, int32(x))\n}\nfunc (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, \"FacetValue_ContentType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = FacetValue_ContentType(value)\n\treturn nil\n}\n\ntype Document_Storage int32\n\nconst (\n\tDocument_DISK Document_Storage = 0\n)\n\nvar Document_Storage_name = map[int32]string{\n\t0: \"DISK\",\n}\nvar Document_Storage_value = map[string]int32{\n\t\"DISK\": 0,\n}\n\nfunc (x Document_Storage) Enum() *Document_Storage {\n\tp := new(Document_Storage)\n\t*p = x\n\treturn p\n}\nfunc (x Document_Storage) String() string {\n\treturn proto.EnumName(Document_Storage_name, int32(x))\n}\nfunc (x *Document_Storage) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, \"Document_Storage\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Document_Storage(value)\n\treturn nil\n}\n\ntype SearchServiceError_ErrorCode int32\n\nconst (\n\tSearchServiceError_OK                     SearchServiceError_ErrorCode = 0\n\tSearchServiceError_INVALID_REQUEST        SearchServiceError_ErrorCode = 1\n\tSearchServiceError_TRANSIENT_ERROR        SearchServiceError_ErrorCode = 2\n\tSearchServiceError_INTERNAL_ERROR         SearchServiceError_ErrorCode = 3\n\tSearchServiceError_PERMISSION_DENIED      SearchServiceError_ErrorCode = 4\n\tSearchServiceError_TIMEOUT                SearchServiceError_ErrorCode = 5\n\tSearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6\n)\n\nvar SearchServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"INVALID_REQUEST\",\n\t2: \"TRANSIENT_ERROR\",\n\t3: \"INTERNAL_ERROR\",\n\t4: \"PERMISSION_DENIED\",\n\t5: \"TIMEOUT\",\n\t6: \"CONCURRENT_TRANSACTION\",\n}\nvar SearchServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                     0,\n\t\"INVALID_REQUEST\":        1,\n\t\"TRANSIENT_ERROR\":        2,\n\t\"INTERNAL_ERROR\":         3,\n\t\"PERMISSION_DENIED\":      4,\n\t\"TIMEOUT\":                5,\n\t\"CONCURRENT_TRANSACTION\": 6,\n}\n\nfunc (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode {\n\tp := new(SearchServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x SearchServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(SearchServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, \"SearchServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = SearchServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype IndexSpec_Consistency int32\n\nconst (\n\tIndexSpec_GLOBAL       IndexSpec_Consistency = 0\n\tIndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1\n)\n\nvar IndexSpec_Consistency_name = map[int32]string{\n\t0: \"GLOBAL\",\n\t1: \"PER_DOCUMENT\",\n}\nvar IndexSpec_Consistency_value = map[string]int32{\n\t\"GLOBAL\":       0,\n\t\"PER_DOCUMENT\": 1,\n}\n\nfunc (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency {\n\tp := new(IndexSpec_Consistency)\n\t*p = x\n\treturn p\n}\nfunc (x IndexSpec_Consistency) String() string {\n\treturn proto.EnumName(IndexSpec_Consistency_name, int32(x))\n}\nfunc (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, \"IndexSpec_Consistency\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = IndexSpec_Consistency(value)\n\treturn nil\n}\n\ntype IndexSpec_Source int32\n\nconst (\n\tIndexSpec_SEARCH        IndexSpec_Source = 0\n\tIndexSpec_DATASTORE     IndexSpec_Source = 1\n\tIndexSpec_CLOUD_STORAGE IndexSpec_Source = 2\n)\n\nvar IndexSpec_Source_name = map[int32]string{\n\t0: \"SEARCH\",\n\t1: \"DATASTORE\",\n\t2: \"CLOUD_STORAGE\",\n}\nvar IndexSpec_Source_value = map[string]int32{\n\t\"SEARCH\":        0,\n\t\"DATASTORE\":     1,\n\t\"CLOUD_STORAGE\": 2,\n}\n\nfunc (x IndexSpec_Source) Enum() *IndexSpec_Source {\n\tp := new(IndexSpec_Source)\n\t*p = x\n\treturn p\n}\nfunc (x IndexSpec_Source) String() string {\n\treturn proto.EnumName(IndexSpec_Source_name, int32(x))\n}\nfunc (x *IndexSpec_Source) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, \"IndexSpec_Source\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = IndexSpec_Source(value)\n\treturn nil\n}\n\ntype IndexSpec_Mode int32\n\nconst (\n\tIndexSpec_PRIORITY   IndexSpec_Mode = 0\n\tIndexSpec_BACKGROUND IndexSpec_Mode = 1\n)\n\nvar IndexSpec_Mode_name = map[int32]string{\n\t0: \"PRIORITY\",\n\t1: \"BACKGROUND\",\n}\nvar IndexSpec_Mode_value = map[string]int32{\n\t\"PRIORITY\":   0,\n\t\"BACKGROUND\": 1,\n}\n\nfunc (x IndexSpec_Mode) Enum() *IndexSpec_Mode {\n\tp := new(IndexSpec_Mode)\n\t*p = x\n\treturn p\n}\nfunc (x IndexSpec_Mode) String() string {\n\treturn proto.EnumName(IndexSpec_Mode_name, int32(x))\n}\nfunc (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, \"IndexSpec_Mode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = IndexSpec_Mode(value)\n\treturn nil\n}\n\ntype IndexDocumentParams_Freshness int32\n\nconst (\n\tIndexDocumentParams_SYNCHRONOUSLY   IndexDocumentParams_Freshness = 0\n\tIndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1\n)\n\nvar IndexDocumentParams_Freshness_name = map[int32]string{\n\t0: \"SYNCHRONOUSLY\",\n\t1: \"WHEN_CONVENIENT\",\n}\nvar IndexDocumentParams_Freshness_value = map[string]int32{\n\t\"SYNCHRONOUSLY\":   0,\n\t\"WHEN_CONVENIENT\": 1,\n}\n\nfunc (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness {\n\tp := new(IndexDocumentParams_Freshness)\n\t*p = x\n\treturn p\n}\nfunc (x IndexDocumentParams_Freshness) String() string {\n\treturn proto.EnumName(IndexDocumentParams_Freshness_name, int32(x))\n}\nfunc (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, \"IndexDocumentParams_Freshness\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = IndexDocumentParams_Freshness(value)\n\treturn nil\n}\n\ntype ScorerSpec_Scorer int32\n\nconst (\n\tScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0\n\tScorerSpec_MATCH_SCORER           ScorerSpec_Scorer = 2\n)\n\nvar ScorerSpec_Scorer_name = map[int32]string{\n\t0: \"RESCORING_MATCH_SCORER\",\n\t2: \"MATCH_SCORER\",\n}\nvar ScorerSpec_Scorer_value = map[string]int32{\n\t\"RESCORING_MATCH_SCORER\": 0,\n\t\"MATCH_SCORER\":           2,\n}\n\nfunc (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer {\n\tp := new(ScorerSpec_Scorer)\n\t*p = x\n\treturn p\n}\nfunc (x ScorerSpec_Scorer) String() string {\n\treturn proto.EnumName(ScorerSpec_Scorer_name, int32(x))\n}\nfunc (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, \"ScorerSpec_Scorer\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ScorerSpec_Scorer(value)\n\treturn nil\n}\n\ntype SearchParams_CursorType int32\n\nconst (\n\tSearchParams_NONE       SearchParams_CursorType = 0\n\tSearchParams_SINGLE     SearchParams_CursorType = 1\n\tSearchParams_PER_RESULT SearchParams_CursorType = 2\n)\n\nvar SearchParams_CursorType_name = map[int32]string{\n\t0: \"NONE\",\n\t1: \"SINGLE\",\n\t2: \"PER_RESULT\",\n}\nvar SearchParams_CursorType_value = map[string]int32{\n\t\"NONE\":       0,\n\t\"SINGLE\":     1,\n\t\"PER_RESULT\": 2,\n}\n\nfunc (x SearchParams_CursorType) Enum() *SearchParams_CursorType {\n\tp := new(SearchParams_CursorType)\n\t*p = x\n\treturn p\n}\nfunc (x SearchParams_CursorType) String() string {\n\treturn proto.EnumName(SearchParams_CursorType_name, int32(x))\n}\nfunc (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, \"SearchParams_CursorType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = SearchParams_CursorType(value)\n\treturn nil\n}\n\ntype SearchParams_ParsingMode int32\n\nconst (\n\tSearchParams_STRICT  SearchParams_ParsingMode = 0\n\tSearchParams_RELAXED SearchParams_ParsingMode = 1\n)\n\nvar SearchParams_ParsingMode_name = map[int32]string{\n\t0: \"STRICT\",\n\t1: \"RELAXED\",\n}\nvar SearchParams_ParsingMode_value = map[string]int32{\n\t\"STRICT\":  0,\n\t\"RELAXED\": 1,\n}\n\nfunc (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode {\n\tp := new(SearchParams_ParsingMode)\n\t*p = x\n\treturn p\n}\nfunc (x SearchParams_ParsingMode) String() string {\n\treturn proto.EnumName(SearchParams_ParsingMode_name, int32(x))\n}\nfunc (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, \"SearchParams_ParsingMode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = SearchParams_ParsingMode(value)\n\treturn nil\n}\n\ntype Scope struct {\n\tType             *Scope_Type `protobuf:\"varint,1,opt,name=type,enum=search.Scope_Type\" json:\"type,omitempty\"`\n\tValue            *string     `protobuf:\"bytes,2,opt,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *Scope) Reset()         { *m = Scope{} }\nfunc (m *Scope) String() string { return proto.CompactTextString(m) }\nfunc (*Scope) ProtoMessage()    {}\n\nfunc (m *Scope) GetType() Scope_Type {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn Scope_USER_BY_CANONICAL_ID\n}\n\nfunc (m *Scope) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\ntype Entry struct {\n\tScope            *Scope            `protobuf:\"bytes,1,opt,name=scope\" json:\"scope,omitempty\"`\n\tPermission       *Entry_Permission `protobuf:\"varint,2,opt,name=permission,enum=search.Entry_Permission\" json:\"permission,omitempty\"`\n\tDisplayName      *string           `protobuf:\"bytes,3,opt,name=display_name\" json:\"display_name,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *Entry) Reset()         { *m = Entry{} }\nfunc (m *Entry) String() string { return proto.CompactTextString(m) }\nfunc (*Entry) ProtoMessage()    {}\n\nfunc (m *Entry) GetScope() *Scope {\n\tif m != nil {\n\t\treturn m.Scope\n\t}\n\treturn nil\n}\n\nfunc (m *Entry) GetPermission() Entry_Permission {\n\tif m != nil && m.Permission != nil {\n\t\treturn *m.Permission\n\t}\n\treturn Entry_READ\n}\n\nfunc (m *Entry) GetDisplayName() string {\n\tif m != nil && m.DisplayName != nil {\n\t\treturn *m.DisplayName\n\t}\n\treturn \"\"\n}\n\ntype AccessControlList struct {\n\tOwner            *string  `protobuf:\"bytes,1,opt,name=owner\" json:\"owner,omitempty\"`\n\tEntries          []*Entry `protobuf:\"bytes,2,rep,name=entries\" json:\"entries,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *AccessControlList) Reset()         { *m = AccessControlList{} }\nfunc (m *AccessControlList) String() string { return proto.CompactTextString(m) }\nfunc (*AccessControlList) ProtoMessage()    {}\n\nfunc (m *AccessControlList) GetOwner() string {\n\tif m != nil && m.Owner != nil {\n\t\treturn *m.Owner\n\t}\n\treturn \"\"\n}\n\nfunc (m *AccessControlList) GetEntries() []*Entry {\n\tif m != nil {\n\t\treturn m.Entries\n\t}\n\treturn nil\n}\n\ntype FieldValue struct {\n\tType             *FieldValue_ContentType `protobuf:\"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0\" json:\"type,omitempty\"`\n\tLanguage         *string                 `protobuf:\"bytes,2,opt,name=language,def=en\" json:\"language,omitempty\"`\n\tStringValue      *string                 `protobuf:\"bytes,3,opt,name=string_value\" json:\"string_value,omitempty\"`\n\tGeo              *FieldValue_Geo         `protobuf:\"group,4,opt,name=Geo\" json:\"geo,omitempty\"`\n\tXXX_unrecognized []byte                  `json:\"-\"`\n}\n\nfunc (m *FieldValue) Reset()         { *m = FieldValue{} }\nfunc (m *FieldValue) String() string { return proto.CompactTextString(m) }\nfunc (*FieldValue) ProtoMessage()    {}\n\nconst Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT\nconst Default_FieldValue_Language string = \"en\"\n\nfunc (m *FieldValue) GetType() FieldValue_ContentType {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn Default_FieldValue_Type\n}\n\nfunc (m *FieldValue) GetLanguage() string {\n\tif m != nil && m.Language != nil {\n\t\treturn *m.Language\n\t}\n\treturn Default_FieldValue_Language\n}\n\nfunc (m *FieldValue) GetStringValue() string {\n\tif m != nil && m.StringValue != nil {\n\t\treturn *m.StringValue\n\t}\n\treturn \"\"\n}\n\nfunc (m *FieldValue) GetGeo() *FieldValue_Geo {\n\tif m != nil {\n\t\treturn m.Geo\n\t}\n\treturn nil\n}\n\ntype FieldValue_Geo struct {\n\tLat              *float64 `protobuf:\"fixed64,5,req,name=lat\" json:\"lat,omitempty\"`\n\tLng              *float64 `protobuf:\"fixed64,6,req,name=lng\" json:\"lng,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *FieldValue_Geo) Reset()         { *m = FieldValue_Geo{} }\nfunc (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }\nfunc (*FieldValue_Geo) ProtoMessage()    {}\n\nfunc (m *FieldValue_Geo) GetLat() float64 {\n\tif m != nil && m.Lat != nil {\n\t\treturn *m.Lat\n\t}\n\treturn 0\n}\n\nfunc (m *FieldValue_Geo) GetLng() float64 {\n\tif m != nil && m.Lng != nil {\n\t\treturn *m.Lng\n\t}\n\treturn 0\n}\n\ntype Field struct {\n\tName             *string     `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tValue            *FieldValue `protobuf:\"bytes,2,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *Field) Reset()         { *m = Field{} }\nfunc (m *Field) String() string { return proto.CompactTextString(m) }\nfunc (*Field) ProtoMessage()    {}\n\nfunc (m *Field) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Field) GetValue() *FieldValue {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype FieldTypes struct {\n\tName             *string                  `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tType             []FieldValue_ContentType `protobuf:\"varint,2,rep,name=type,enum=search.FieldValue_ContentType\" json:\"type,omitempty\"`\n\tXXX_unrecognized []byte                   `json:\"-\"`\n}\n\nfunc (m *FieldTypes) Reset()         { *m = FieldTypes{} }\nfunc (m *FieldTypes) String() string { return proto.CompactTextString(m) }\nfunc (*FieldTypes) ProtoMessage()    {}\n\nfunc (m *FieldTypes) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *FieldTypes) GetType() []FieldValue_ContentType {\n\tif m != nil {\n\t\treturn m.Type\n\t}\n\treturn nil\n}\n\ntype IndexShardSettings struct {\n\tPrevNumShards            []int32 `protobuf:\"varint,1,rep,name=prev_num_shards\" json:\"prev_num_shards,omitempty\"`\n\tNumShards                *int32  `protobuf:\"varint,2,req,name=num_shards,def=1\" json:\"num_shards,omitempty\"`\n\tPrevNumShardsSearchFalse []int32 `protobuf:\"varint,3,rep,name=prev_num_shards_search_false\" json:\"prev_num_shards_search_false,omitempty\"`\n\tLocalReplica             *string `protobuf:\"bytes,4,opt,name=local_replica,def=\" json:\"local_replica,omitempty\"`\n\tXXX_unrecognized         []byte  `json:\"-\"`\n}\n\nfunc (m *IndexShardSettings) Reset()         { *m = IndexShardSettings{} }\nfunc (m *IndexShardSettings) String() string { return proto.CompactTextString(m) }\nfunc (*IndexShardSettings) ProtoMessage()    {}\n\nconst Default_IndexShardSettings_NumShards int32 = 1\n\nfunc (m *IndexShardSettings) GetPrevNumShards() []int32 {\n\tif m != nil {\n\t\treturn m.PrevNumShards\n\t}\n\treturn nil\n}\n\nfunc (m *IndexShardSettings) GetNumShards() int32 {\n\tif m != nil && m.NumShards != nil {\n\t\treturn *m.NumShards\n\t}\n\treturn Default_IndexShardSettings_NumShards\n}\n\nfunc (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 {\n\tif m != nil {\n\t\treturn m.PrevNumShardsSearchFalse\n\t}\n\treturn nil\n}\n\nfunc (m *IndexShardSettings) GetLocalReplica() string {\n\tif m != nil && m.LocalReplica != nil {\n\t\treturn *m.LocalReplica\n\t}\n\treturn \"\"\n}\n\ntype FacetValue struct {\n\tType             *FacetValue_ContentType `protobuf:\"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2\" json:\"type,omitempty\"`\n\tStringValue      *string                 `protobuf:\"bytes,3,opt,name=string_value\" json:\"string_value,omitempty\"`\n\tXXX_unrecognized []byte                  `json:\"-\"`\n}\n\nfunc (m *FacetValue) Reset()         { *m = FacetValue{} }\nfunc (m *FacetValue) String() string { return proto.CompactTextString(m) }\nfunc (*FacetValue) ProtoMessage()    {}\n\nconst Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM\n\nfunc (m *FacetValue) GetType() FacetValue_ContentType {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn Default_FacetValue_Type\n}\n\nfunc (m *FacetValue) GetStringValue() string {\n\tif m != nil && m.StringValue != nil {\n\t\treturn *m.StringValue\n\t}\n\treturn \"\"\n}\n\ntype Facet struct {\n\tName             *string     `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tValue            *FacetValue `protobuf:\"bytes,2,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *Facet) Reset()         { *m = Facet{} }\nfunc (m *Facet) String() string { return proto.CompactTextString(m) }\nfunc (*Facet) ProtoMessage()    {}\n\nfunc (m *Facet) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Facet) GetValue() *FacetValue {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype DocumentMetadata struct {\n\tVersion            *int64 `protobuf:\"varint,1,opt,name=version\" json:\"version,omitempty\"`\n\tCommittedStVersion *int64 `protobuf:\"varint,2,opt,name=committed_st_version\" json:\"committed_st_version,omitempty\"`\n\tXXX_unrecognized   []byte `json:\"-\"`\n}\n\nfunc (m *DocumentMetadata) Reset()         { *m = DocumentMetadata{} }\nfunc (m *DocumentMetadata) String() string { return proto.CompactTextString(m) }\nfunc (*DocumentMetadata) ProtoMessage()    {}\n\nfunc (m *DocumentMetadata) GetVersion() int64 {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn 0\n}\n\nfunc (m *DocumentMetadata) GetCommittedStVersion() int64 {\n\tif m != nil && m.CommittedStVersion != nil {\n\t\treturn *m.CommittedStVersion\n\t}\n\treturn 0\n}\n\ntype Document struct {\n\tId               *string           `protobuf:\"bytes,1,opt,name=id\" json:\"id,omitempty\"`\n\tLanguage         *string           `protobuf:\"bytes,2,opt,name=language,def=en\" json:\"language,omitempty\"`\n\tField            []*Field          `protobuf:\"bytes,3,rep,name=field\" json:\"field,omitempty\"`\n\tOrderId          *int32            `protobuf:\"varint,4,opt,name=order_id\" json:\"order_id,omitempty\"`\n\tStorage          *Document_Storage `protobuf:\"varint,5,opt,name=storage,enum=search.Document_Storage,def=0\" json:\"storage,omitempty\"`\n\tFacet            []*Facet          `protobuf:\"bytes,8,rep,name=facet\" json:\"facet,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *Document) Reset()         { *m = Document{} }\nfunc (m *Document) String() string { return proto.CompactTextString(m) }\nfunc (*Document) ProtoMessage()    {}\n\nconst Default_Document_Language string = \"en\"\nconst Default_Document_Storage Document_Storage = Document_DISK\n\nfunc (m *Document) GetId() string {\n\tif m != nil && m.Id != nil {\n\t\treturn *m.Id\n\t}\n\treturn \"\"\n}\n\nfunc (m *Document) GetLanguage() string {\n\tif m != nil && m.Language != nil {\n\t\treturn *m.Language\n\t}\n\treturn Default_Document_Language\n}\n\nfunc (m *Document) GetField() []*Field {\n\tif m != nil {\n\t\treturn m.Field\n\t}\n\treturn nil\n}\n\nfunc (m *Document) GetOrderId() int32 {\n\tif m != nil && m.OrderId != nil {\n\t\treturn *m.OrderId\n\t}\n\treturn 0\n}\n\nfunc (m *Document) GetStorage() Document_Storage {\n\tif m != nil && m.Storage != nil {\n\t\treturn *m.Storage\n\t}\n\treturn Default_Document_Storage\n}\n\nfunc (m *Document) GetFacet() []*Facet {\n\tif m != nil {\n\t\treturn m.Facet\n\t}\n\treturn nil\n}\n\ntype SearchServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SearchServiceError) Reset()         { *m = SearchServiceError{} }\nfunc (m *SearchServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*SearchServiceError) ProtoMessage()    {}\n\ntype RequestStatus struct {\n\tCode             *SearchServiceError_ErrorCode `protobuf:\"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode\" json:\"code,omitempty\"`\n\tErrorDetail      *string                       `protobuf:\"bytes,2,opt,name=error_detail\" json:\"error_detail,omitempty\"`\n\tCanonicalCode    *int32                        `protobuf:\"varint,3,opt,name=canonical_code\" json:\"canonical_code,omitempty\"`\n\tXXX_unrecognized []byte                        `json:\"-\"`\n}\n\nfunc (m *RequestStatus) Reset()         { *m = RequestStatus{} }\nfunc (m *RequestStatus) String() string { return proto.CompactTextString(m) }\nfunc (*RequestStatus) ProtoMessage()    {}\n\nfunc (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {\n\tif m != nil && m.Code != nil {\n\t\treturn *m.Code\n\t}\n\treturn SearchServiceError_OK\n}\n\nfunc (m *RequestStatus) GetErrorDetail() string {\n\tif m != nil && m.ErrorDetail != nil {\n\t\treturn *m.ErrorDetail\n\t}\n\treturn \"\"\n}\n\nfunc (m *RequestStatus) GetCanonicalCode() int32 {\n\tif m != nil && m.CanonicalCode != nil {\n\t\treturn *m.CanonicalCode\n\t}\n\treturn 0\n}\n\ntype IndexSpec struct {\n\tName             *string                `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tConsistency      *IndexSpec_Consistency `protobuf:\"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1\" json:\"consistency,omitempty\"`\n\tNamespace        *string                `protobuf:\"bytes,3,opt,name=namespace\" json:\"namespace,omitempty\"`\n\tVersion          *int32                 `protobuf:\"varint,4,opt,name=version\" json:\"version,omitempty\"`\n\tSource           *IndexSpec_Source      `protobuf:\"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0\" json:\"source,omitempty\"`\n\tMode             *IndexSpec_Mode        `protobuf:\"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0\" json:\"mode,omitempty\"`\n\tXXX_unrecognized []byte                 `json:\"-\"`\n}\n\nfunc (m *IndexSpec) Reset()         { *m = IndexSpec{} }\nfunc (m *IndexSpec) String() string { return proto.CompactTextString(m) }\nfunc (*IndexSpec) ProtoMessage()    {}\n\nconst Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT\nconst Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH\nconst Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY\n\nfunc (m *IndexSpec) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *IndexSpec) GetConsistency() IndexSpec_Consistency {\n\tif m != nil && m.Consistency != nil {\n\t\treturn *m.Consistency\n\t}\n\treturn Default_IndexSpec_Consistency\n}\n\nfunc (m *IndexSpec) GetNamespace() string {\n\tif m != nil && m.Namespace != nil {\n\t\treturn *m.Namespace\n\t}\n\treturn \"\"\n}\n\nfunc (m *IndexSpec) GetVersion() int32 {\n\tif m != nil && m.Version != nil {\n\t\treturn *m.Version\n\t}\n\treturn 0\n}\n\nfunc (m *IndexSpec) GetSource() IndexSpec_Source {\n\tif m != nil && m.Source != nil {\n\t\treturn *m.Source\n\t}\n\treturn Default_IndexSpec_Source\n}\n\nfunc (m *IndexSpec) GetMode() IndexSpec_Mode {\n\tif m != nil && m.Mode != nil {\n\t\treturn *m.Mode\n\t}\n\treturn Default_IndexSpec_Mode\n}\n\ntype IndexMetadata struct {\n\tIndexSpec        *IndexSpec             `protobuf:\"bytes,1,req,name=index_spec\" json:\"index_spec,omitempty\"`\n\tField            []*FieldTypes          `protobuf:\"bytes,2,rep,name=field\" json:\"field,omitempty\"`\n\tStorage          *IndexMetadata_Storage `protobuf:\"bytes,3,opt,name=storage\" json:\"storage,omitempty\"`\n\tXXX_unrecognized []byte                 `json:\"-\"`\n}\n\nfunc (m *IndexMetadata) Reset()         { *m = IndexMetadata{} }\nfunc (m *IndexMetadata) String() string { return proto.CompactTextString(m) }\nfunc (*IndexMetadata) ProtoMessage()    {}\n\nfunc (m *IndexMetadata) GetIndexSpec() *IndexSpec {\n\tif m != nil {\n\t\treturn m.IndexSpec\n\t}\n\treturn nil\n}\n\nfunc (m *IndexMetadata) GetField() []*FieldTypes {\n\tif m != nil {\n\t\treturn m.Field\n\t}\n\treturn nil\n}\n\nfunc (m *IndexMetadata) GetStorage() *IndexMetadata_Storage {\n\tif m != nil {\n\t\treturn m.Storage\n\t}\n\treturn nil\n}\n\ntype IndexMetadata_Storage struct {\n\tAmountUsed       *int64 `protobuf:\"varint,1,opt,name=amount_used\" json:\"amount_used,omitempty\"`\n\tLimit            *int64 `protobuf:\"varint,2,opt,name=limit\" json:\"limit,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *IndexMetadata_Storage) Reset()         { *m = IndexMetadata_Storage{} }\nfunc (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }\nfunc (*IndexMetadata_Storage) ProtoMessage()    {}\n\nfunc (m *IndexMetadata_Storage) GetAmountUsed() int64 {\n\tif m != nil && m.AmountUsed != nil {\n\t\treturn *m.AmountUsed\n\t}\n\treturn 0\n}\n\nfunc (m *IndexMetadata_Storage) GetLimit() int64 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn 0\n}\n\ntype IndexDocumentParams struct {\n\tDocument         []*Document                    `protobuf:\"bytes,1,rep,name=document\" json:\"document,omitempty\"`\n\tFreshness        *IndexDocumentParams_Freshness `protobuf:\"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0\" json:\"freshness,omitempty\"`\n\tIndexSpec        *IndexSpec                     `protobuf:\"bytes,3,req,name=index_spec\" json:\"index_spec,omitempty\"`\n\tXXX_unrecognized []byte                         `json:\"-\"`\n}\n\nfunc (m *IndexDocumentParams) Reset()         { *m = IndexDocumentParams{} }\nfunc (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }\nfunc (*IndexDocumentParams) ProtoMessage()    {}\n\nconst Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY\n\nfunc (m *IndexDocumentParams) GetDocument() []*Document {\n\tif m != nil {\n\t\treturn m.Document\n\t}\n\treturn nil\n}\n\nfunc (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness {\n\tif m != nil && m.Freshness != nil {\n\t\treturn *m.Freshness\n\t}\n\treturn Default_IndexDocumentParams_Freshness\n}\n\nfunc (m *IndexDocumentParams) GetIndexSpec() *IndexSpec {\n\tif m != nil {\n\t\treturn m.IndexSpec\n\t}\n\treturn nil\n}\n\ntype IndexDocumentRequest struct {\n\tParams           *IndexDocumentParams `protobuf:\"bytes,1,req,name=params\" json:\"params,omitempty\"`\n\tAppId            []byte               `protobuf:\"bytes,3,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte               `json:\"-\"`\n}\n\nfunc (m *IndexDocumentRequest) Reset()         { *m = IndexDocumentRequest{} }\nfunc (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }\nfunc (*IndexDocumentRequest) ProtoMessage()    {}\n\nfunc (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {\n\tif m != nil {\n\t\treturn m.Params\n\t}\n\treturn nil\n}\n\nfunc (m *IndexDocumentRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype IndexDocumentResponse struct {\n\tStatus           []*RequestStatus `protobuf:\"bytes,1,rep,name=status\" json:\"status,omitempty\"`\n\tDocId            []string         `protobuf:\"bytes,2,rep,name=doc_id\" json:\"doc_id,omitempty\"`\n\tXXX_unrecognized []byte           `json:\"-\"`\n}\n\nfunc (m *IndexDocumentResponse) Reset()         { *m = IndexDocumentResponse{} }\nfunc (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }\nfunc (*IndexDocumentResponse) ProtoMessage()    {}\n\nfunc (m *IndexDocumentResponse) GetStatus() []*RequestStatus {\n\tif m != nil {\n\t\treturn m.Status\n\t}\n\treturn nil\n}\n\nfunc (m *IndexDocumentResponse) GetDocId() []string {\n\tif m != nil {\n\t\treturn m.DocId\n\t}\n\treturn nil\n}\n\ntype DeleteDocumentParams struct {\n\tDocId            []string   `protobuf:\"bytes,1,rep,name=doc_id\" json:\"doc_id,omitempty\"`\n\tIndexSpec        *IndexSpec `protobuf:\"bytes,2,req,name=index_spec\" json:\"index_spec,omitempty\"`\n\tXXX_unrecognized []byte     `json:\"-\"`\n}\n\nfunc (m *DeleteDocumentParams) Reset()         { *m = DeleteDocumentParams{} }\nfunc (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteDocumentParams) ProtoMessage()    {}\n\nfunc (m *DeleteDocumentParams) GetDocId() []string {\n\tif m != nil {\n\t\treturn m.DocId\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec {\n\tif m != nil {\n\t\treturn m.IndexSpec\n\t}\n\treturn nil\n}\n\ntype DeleteDocumentRequest struct {\n\tParams           *DeleteDocumentParams `protobuf:\"bytes,1,req,name=params\" json:\"params,omitempty\"`\n\tAppId            []byte                `protobuf:\"bytes,3,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte                `json:\"-\"`\n}\n\nfunc (m *DeleteDocumentRequest) Reset()         { *m = DeleteDocumentRequest{} }\nfunc (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteDocumentRequest) ProtoMessage()    {}\n\nfunc (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {\n\tif m != nil {\n\t\treturn m.Params\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteDocumentRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype DeleteDocumentResponse struct {\n\tStatus           []*RequestStatus `protobuf:\"bytes,1,rep,name=status\" json:\"status,omitempty\"`\n\tXXX_unrecognized []byte           `json:\"-\"`\n}\n\nfunc (m *DeleteDocumentResponse) Reset()         { *m = DeleteDocumentResponse{} }\nfunc (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteDocumentResponse) ProtoMessage()    {}\n\nfunc (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {\n\tif m != nil {\n\t\treturn m.Status\n\t}\n\treturn nil\n}\n\ntype ListDocumentsParams struct {\n\tIndexSpec        *IndexSpec `protobuf:\"bytes,1,req,name=index_spec\" json:\"index_spec,omitempty\"`\n\tStartDocId       *string    `protobuf:\"bytes,2,opt,name=start_doc_id\" json:\"start_doc_id,omitempty\"`\n\tIncludeStartDoc  *bool      `protobuf:\"varint,3,opt,name=include_start_doc,def=1\" json:\"include_start_doc,omitempty\"`\n\tLimit            *int32     `protobuf:\"varint,4,opt,name=limit,def=100\" json:\"limit,omitempty\"`\n\tKeysOnly         *bool      `protobuf:\"varint,5,opt,name=keys_only\" json:\"keys_only,omitempty\"`\n\tXXX_unrecognized []byte     `json:\"-\"`\n}\n\nfunc (m *ListDocumentsParams) Reset()         { *m = ListDocumentsParams{} }\nfunc (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }\nfunc (*ListDocumentsParams) ProtoMessage()    {}\n\nconst Default_ListDocumentsParams_IncludeStartDoc bool = true\nconst Default_ListDocumentsParams_Limit int32 = 100\n\nfunc (m *ListDocumentsParams) GetIndexSpec() *IndexSpec {\n\tif m != nil {\n\t\treturn m.IndexSpec\n\t}\n\treturn nil\n}\n\nfunc (m *ListDocumentsParams) GetStartDocId() string {\n\tif m != nil && m.StartDocId != nil {\n\t\treturn *m.StartDocId\n\t}\n\treturn \"\"\n}\n\nfunc (m *ListDocumentsParams) GetIncludeStartDoc() bool {\n\tif m != nil && m.IncludeStartDoc != nil {\n\t\treturn *m.IncludeStartDoc\n\t}\n\treturn Default_ListDocumentsParams_IncludeStartDoc\n}\n\nfunc (m *ListDocumentsParams) GetLimit() int32 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn Default_ListDocumentsParams_Limit\n}\n\nfunc (m *ListDocumentsParams) GetKeysOnly() bool {\n\tif m != nil && m.KeysOnly != nil {\n\t\treturn *m.KeysOnly\n\t}\n\treturn false\n}\n\ntype ListDocumentsRequest struct {\n\tParams           *ListDocumentsParams `protobuf:\"bytes,1,req,name=params\" json:\"params,omitempty\"`\n\tAppId            []byte               `protobuf:\"bytes,2,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte               `json:\"-\"`\n}\n\nfunc (m *ListDocumentsRequest) Reset()         { *m = ListDocumentsRequest{} }\nfunc (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ListDocumentsRequest) ProtoMessage()    {}\n\nfunc (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {\n\tif m != nil {\n\t\treturn m.Params\n\t}\n\treturn nil\n}\n\nfunc (m *ListDocumentsRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype ListDocumentsResponse struct {\n\tStatus           *RequestStatus `protobuf:\"bytes,1,req,name=status\" json:\"status,omitempty\"`\n\tDocument         []*Document    `protobuf:\"bytes,2,rep,name=document\" json:\"document,omitempty\"`\n\tXXX_unrecognized []byte         `json:\"-\"`\n}\n\nfunc (m *ListDocumentsResponse) Reset()         { *m = ListDocumentsResponse{} }\nfunc (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ListDocumentsResponse) ProtoMessage()    {}\n\nfunc (m *ListDocumentsResponse) GetStatus() *RequestStatus {\n\tif m != nil {\n\t\treturn m.Status\n\t}\n\treturn nil\n}\n\nfunc (m *ListDocumentsResponse) GetDocument() []*Document {\n\tif m != nil {\n\t\treturn m.Document\n\t}\n\treturn nil\n}\n\ntype ListIndexesParams struct {\n\tFetchSchema       *bool             `protobuf:\"varint,1,opt,name=fetch_schema\" json:\"fetch_schema,omitempty\"`\n\tLimit             *int32            `protobuf:\"varint,2,opt,name=limit,def=20\" json:\"limit,omitempty\"`\n\tNamespace         *string           `protobuf:\"bytes,3,opt,name=namespace\" json:\"namespace,omitempty\"`\n\tStartIndexName    *string           `protobuf:\"bytes,4,opt,name=start_index_name\" json:\"start_index_name,omitempty\"`\n\tIncludeStartIndex *bool             `protobuf:\"varint,5,opt,name=include_start_index,def=1\" json:\"include_start_index,omitempty\"`\n\tIndexNamePrefix   *string           `protobuf:\"bytes,6,opt,name=index_name_prefix\" json:\"index_name_prefix,omitempty\"`\n\tOffset            *int32            `protobuf:\"varint,7,opt,name=offset\" json:\"offset,omitempty\"`\n\tSource            *IndexSpec_Source `protobuf:\"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0\" json:\"source,omitempty\"`\n\tXXX_unrecognized  []byte            `json:\"-\"`\n}\n\nfunc (m *ListIndexesParams) Reset()         { *m = ListIndexesParams{} }\nfunc (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }\nfunc (*ListIndexesParams) ProtoMessage()    {}\n\nconst Default_ListIndexesParams_Limit int32 = 20\nconst Default_ListIndexesParams_IncludeStartIndex bool = true\nconst Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH\n\nfunc (m *ListIndexesParams) GetFetchSchema() bool {\n\tif m != nil && m.FetchSchema != nil {\n\t\treturn *m.FetchSchema\n\t}\n\treturn false\n}\n\nfunc (m *ListIndexesParams) GetLimit() int32 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn Default_ListIndexesParams_Limit\n}\n\nfunc (m *ListIndexesParams) GetNamespace() string {\n\tif m != nil && m.Namespace != nil {\n\t\treturn *m.Namespace\n\t}\n\treturn \"\"\n}\n\nfunc (m *ListIndexesParams) GetStartIndexName() string {\n\tif m != nil && m.StartIndexName != nil {\n\t\treturn *m.StartIndexName\n\t}\n\treturn \"\"\n}\n\nfunc (m *ListIndexesParams) GetIncludeStartIndex() bool {\n\tif m != nil && m.IncludeStartIndex != nil {\n\t\treturn *m.IncludeStartIndex\n\t}\n\treturn Default_ListIndexesParams_IncludeStartIndex\n}\n\nfunc (m *ListIndexesParams) GetIndexNamePrefix() string {\n\tif m != nil && m.IndexNamePrefix != nil {\n\t\treturn *m.IndexNamePrefix\n\t}\n\treturn \"\"\n}\n\nfunc (m *ListIndexesParams) GetOffset() int32 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn 0\n}\n\nfunc (m *ListIndexesParams) GetSource() IndexSpec_Source {\n\tif m != nil && m.Source != nil {\n\t\treturn *m.Source\n\t}\n\treturn Default_ListIndexesParams_Source\n}\n\ntype ListIndexesRequest struct {\n\tParams           *ListIndexesParams `protobuf:\"bytes,1,req,name=params\" json:\"params,omitempty\"`\n\tAppId            []byte             `protobuf:\"bytes,3,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte             `json:\"-\"`\n}\n\nfunc (m *ListIndexesRequest) Reset()         { *m = ListIndexesRequest{} }\nfunc (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ListIndexesRequest) ProtoMessage()    {}\n\nfunc (m *ListIndexesRequest) GetParams() *ListIndexesParams {\n\tif m != nil {\n\t\treturn m.Params\n\t}\n\treturn nil\n}\n\nfunc (m *ListIndexesRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype ListIndexesResponse struct {\n\tStatus           *RequestStatus   `protobuf:\"bytes,1,req,name=status\" json:\"status,omitempty\"`\n\tIndexMetadata    []*IndexMetadata `protobuf:\"bytes,2,rep,name=index_metadata\" json:\"index_metadata,omitempty\"`\n\tXXX_unrecognized []byte           `json:\"-\"`\n}\n\nfunc (m *ListIndexesResponse) Reset()         { *m = ListIndexesResponse{} }\nfunc (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ListIndexesResponse) ProtoMessage()    {}\n\nfunc (m *ListIndexesResponse) GetStatus() *RequestStatus {\n\tif m != nil {\n\t\treturn m.Status\n\t}\n\treturn nil\n}\n\nfunc (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata {\n\tif m != nil {\n\t\treturn m.IndexMetadata\n\t}\n\treturn nil\n}\n\ntype DeleteSchemaParams struct {\n\tSource           *IndexSpec_Source `protobuf:\"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0\" json:\"source,omitempty\"`\n\tIndexSpec        []*IndexSpec      `protobuf:\"bytes,2,rep,name=index_spec\" json:\"index_spec,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *DeleteSchemaParams) Reset()         { *m = DeleteSchemaParams{} }\nfunc (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteSchemaParams) ProtoMessage()    {}\n\nconst Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH\n\nfunc (m *DeleteSchemaParams) GetSource() IndexSpec_Source {\n\tif m != nil && m.Source != nil {\n\t\treturn *m.Source\n\t}\n\treturn Default_DeleteSchemaParams_Source\n}\n\nfunc (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec {\n\tif m != nil {\n\t\treturn m.IndexSpec\n\t}\n\treturn nil\n}\n\ntype DeleteSchemaRequest struct {\n\tParams           *DeleteSchemaParams `protobuf:\"bytes,1,req,name=params\" json:\"params,omitempty\"`\n\tAppId            []byte              `protobuf:\"bytes,3,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte              `json:\"-\"`\n}\n\nfunc (m *DeleteSchemaRequest) Reset()         { *m = DeleteSchemaRequest{} }\nfunc (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteSchemaRequest) ProtoMessage()    {}\n\nfunc (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {\n\tif m != nil {\n\t\treturn m.Params\n\t}\n\treturn nil\n}\n\nfunc (m *DeleteSchemaRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype DeleteSchemaResponse struct {\n\tStatus           []*RequestStatus `protobuf:\"bytes,1,rep,name=status\" json:\"status,omitempty\"`\n\tXXX_unrecognized []byte           `json:\"-\"`\n}\n\nfunc (m *DeleteSchemaResponse) Reset()         { *m = DeleteSchemaResponse{} }\nfunc (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteSchemaResponse) ProtoMessage()    {}\n\nfunc (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {\n\tif m != nil {\n\t\treturn m.Status\n\t}\n\treturn nil\n}\n\ntype SortSpec struct {\n\tSortExpression      *string  `protobuf:\"bytes,1,req,name=sort_expression\" json:\"sort_expression,omitempty\"`\n\tSortDescending      *bool    `protobuf:\"varint,2,opt,name=sort_descending,def=1\" json:\"sort_descending,omitempty\"`\n\tDefaultValueText    *string  `protobuf:\"bytes,4,opt,name=default_value_text\" json:\"default_value_text,omitempty\"`\n\tDefaultValueNumeric *float64 `protobuf:\"fixed64,5,opt,name=default_value_numeric\" json:\"default_value_numeric,omitempty\"`\n\tXXX_unrecognized    []byte   `json:\"-\"`\n}\n\nfunc (m *SortSpec) Reset()         { *m = SortSpec{} }\nfunc (m *SortSpec) String() string { return proto.CompactTextString(m) }\nfunc (*SortSpec) ProtoMessage()    {}\n\nconst Default_SortSpec_SortDescending bool = true\n\nfunc (m *SortSpec) GetSortExpression() string {\n\tif m != nil && m.SortExpression != nil {\n\t\treturn *m.SortExpression\n\t}\n\treturn \"\"\n}\n\nfunc (m *SortSpec) GetSortDescending() bool {\n\tif m != nil && m.SortDescending != nil {\n\t\treturn *m.SortDescending\n\t}\n\treturn Default_SortSpec_SortDescending\n}\n\nfunc (m *SortSpec) GetDefaultValueText() string {\n\tif m != nil && m.DefaultValueText != nil {\n\t\treturn *m.DefaultValueText\n\t}\n\treturn \"\"\n}\n\nfunc (m *SortSpec) GetDefaultValueNumeric() float64 {\n\tif m != nil && m.DefaultValueNumeric != nil {\n\t\treturn *m.DefaultValueNumeric\n\t}\n\treturn 0\n}\n\ntype ScorerSpec struct {\n\tScorer                *ScorerSpec_Scorer `protobuf:\"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2\" json:\"scorer,omitempty\"`\n\tLimit                 *int32             `protobuf:\"varint,2,opt,name=limit,def=1000\" json:\"limit,omitempty\"`\n\tMatchScorerParameters *string            `protobuf:\"bytes,9,opt,name=match_scorer_parameters\" json:\"match_scorer_parameters,omitempty\"`\n\tXXX_unrecognized      []byte             `json:\"-\"`\n}\n\nfunc (m *ScorerSpec) Reset()         { *m = ScorerSpec{} }\nfunc (m *ScorerSpec) String() string { return proto.CompactTextString(m) }\nfunc (*ScorerSpec) ProtoMessage()    {}\n\nconst Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER\nconst Default_ScorerSpec_Limit int32 = 1000\n\nfunc (m *ScorerSpec) GetScorer() ScorerSpec_Scorer {\n\tif m != nil && m.Scorer != nil {\n\t\treturn *m.Scorer\n\t}\n\treturn Default_ScorerSpec_Scorer\n}\n\nfunc (m *ScorerSpec) GetLimit() int32 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn Default_ScorerSpec_Limit\n}\n\nfunc (m *ScorerSpec) GetMatchScorerParameters() string {\n\tif m != nil && m.MatchScorerParameters != nil {\n\t\treturn *m.MatchScorerParameters\n\t}\n\treturn \"\"\n}\n\ntype FieldSpec struct {\n\tName             []string                `protobuf:\"bytes,1,rep,name=name\" json:\"name,omitempty\"`\n\tExpression       []*FieldSpec_Expression `protobuf:\"group,2,rep,name=Expression\" json:\"expression,omitempty\"`\n\tXXX_unrecognized []byte                  `json:\"-\"`\n}\n\nfunc (m *FieldSpec) Reset()         { *m = FieldSpec{} }\nfunc (m *FieldSpec) String() string { return proto.CompactTextString(m) }\nfunc (*FieldSpec) ProtoMessage()    {}\n\nfunc (m *FieldSpec) GetName() []string {\n\tif m != nil {\n\t\treturn m.Name\n\t}\n\treturn nil\n}\n\nfunc (m *FieldSpec) GetExpression() []*FieldSpec_Expression {\n\tif m != nil {\n\t\treturn m.Expression\n\t}\n\treturn nil\n}\n\ntype FieldSpec_Expression struct {\n\tName             *string `protobuf:\"bytes,3,req,name=name\" json:\"name,omitempty\"`\n\tExpression       *string `protobuf:\"bytes,4,req,name=expression\" json:\"expression,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *FieldSpec_Expression) Reset()         { *m = FieldSpec_Expression{} }\nfunc (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }\nfunc (*FieldSpec_Expression) ProtoMessage()    {}\n\nfunc (m *FieldSpec_Expression) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *FieldSpec_Expression) GetExpression() string {\n\tif m != nil && m.Expression != nil {\n\t\treturn *m.Expression\n\t}\n\treturn \"\"\n}\n\ntype FacetRange struct {\n\tName             *string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tStart            *string `protobuf:\"bytes,2,opt,name=start\" json:\"start,omitempty\"`\n\tEnd              *string `protobuf:\"bytes,3,opt,name=end\" json:\"end,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *FacetRange) Reset()         { *m = FacetRange{} }\nfunc (m *FacetRange) String() string { return proto.CompactTextString(m) }\nfunc (*FacetRange) ProtoMessage()    {}\n\nfunc (m *FacetRange) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetRange) GetStart() string {\n\tif m != nil && m.Start != nil {\n\t\treturn *m.Start\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetRange) GetEnd() string {\n\tif m != nil && m.End != nil {\n\t\treturn *m.End\n\t}\n\treturn \"\"\n}\n\ntype FacetRequestParam struct {\n\tValueLimit       *int32        `protobuf:\"varint,1,opt,name=value_limit\" json:\"value_limit,omitempty\"`\n\tRange            []*FacetRange `protobuf:\"bytes,2,rep,name=range\" json:\"range,omitempty\"`\n\tValueConstraint  []string      `protobuf:\"bytes,3,rep,name=value_constraint\" json:\"value_constraint,omitempty\"`\n\tXXX_unrecognized []byte        `json:\"-\"`\n}\n\nfunc (m *FacetRequestParam) Reset()         { *m = FacetRequestParam{} }\nfunc (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }\nfunc (*FacetRequestParam) ProtoMessage()    {}\n\nfunc (m *FacetRequestParam) GetValueLimit() int32 {\n\tif m != nil && m.ValueLimit != nil {\n\t\treturn *m.ValueLimit\n\t}\n\treturn 0\n}\n\nfunc (m *FacetRequestParam) GetRange() []*FacetRange {\n\tif m != nil {\n\t\treturn m.Range\n\t}\n\treturn nil\n}\n\nfunc (m *FacetRequestParam) GetValueConstraint() []string {\n\tif m != nil {\n\t\treturn m.ValueConstraint\n\t}\n\treturn nil\n}\n\ntype FacetAutoDetectParam struct {\n\tValueLimit       *int32 `protobuf:\"varint,1,opt,name=value_limit,def=10\" json:\"value_limit,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *FacetAutoDetectParam) Reset()         { *m = FacetAutoDetectParam{} }\nfunc (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }\nfunc (*FacetAutoDetectParam) ProtoMessage()    {}\n\nconst Default_FacetAutoDetectParam_ValueLimit int32 = 10\n\nfunc (m *FacetAutoDetectParam) GetValueLimit() int32 {\n\tif m != nil && m.ValueLimit != nil {\n\t\treturn *m.ValueLimit\n\t}\n\treturn Default_FacetAutoDetectParam_ValueLimit\n}\n\ntype FacetRequest struct {\n\tName             *string            `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tParams           *FacetRequestParam `protobuf:\"bytes,2,opt,name=params\" json:\"params,omitempty\"`\n\tXXX_unrecognized []byte             `json:\"-\"`\n}\n\nfunc (m *FacetRequest) Reset()         { *m = FacetRequest{} }\nfunc (m *FacetRequest) String() string { return proto.CompactTextString(m) }\nfunc (*FacetRequest) ProtoMessage()    {}\n\nfunc (m *FacetRequest) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetRequest) GetParams() *FacetRequestParam {\n\tif m != nil {\n\t\treturn m.Params\n\t}\n\treturn nil\n}\n\ntype FacetRefinement struct {\n\tName             *string                `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tValue            *string                `protobuf:\"bytes,2,opt,name=value\" json:\"value,omitempty\"`\n\tRange            *FacetRefinement_Range `protobuf:\"bytes,3,opt,name=range\" json:\"range,omitempty\"`\n\tXXX_unrecognized []byte                 `json:\"-\"`\n}\n\nfunc (m *FacetRefinement) Reset()         { *m = FacetRefinement{} }\nfunc (m *FacetRefinement) String() string { return proto.CompactTextString(m) }\nfunc (*FacetRefinement) ProtoMessage()    {}\n\nfunc (m *FacetRefinement) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetRefinement) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetRefinement) GetRange() *FacetRefinement_Range {\n\tif m != nil {\n\t\treturn m.Range\n\t}\n\treturn nil\n}\n\ntype FacetRefinement_Range struct {\n\tStart            *string `protobuf:\"bytes,1,opt,name=start\" json:\"start,omitempty\"`\n\tEnd              *string `protobuf:\"bytes,2,opt,name=end\" json:\"end,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *FacetRefinement_Range) Reset()         { *m = FacetRefinement_Range{} }\nfunc (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) }\nfunc (*FacetRefinement_Range) ProtoMessage()    {}\n\nfunc (m *FacetRefinement_Range) GetStart() string {\n\tif m != nil && m.Start != nil {\n\t\treturn *m.Start\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetRefinement_Range) GetEnd() string {\n\tif m != nil && m.End != nil {\n\t\treturn *m.End\n\t}\n\treturn \"\"\n}\n\ntype SearchParams struct {\n\tIndexSpec              *IndexSpec                `protobuf:\"bytes,1,req,name=index_spec\" json:\"index_spec,omitempty\"`\n\tQuery                  *string                   `protobuf:\"bytes,2,req,name=query\" json:\"query,omitempty\"`\n\tCursor                 *string                   `protobuf:\"bytes,4,opt,name=cursor\" json:\"cursor,omitempty\"`\n\tOffset                 *int32                    `protobuf:\"varint,11,opt,name=offset\" json:\"offset,omitempty\"`\n\tCursorType             *SearchParams_CursorType  `protobuf:\"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0\" json:\"cursor_type,omitempty\"`\n\tLimit                  *int32                    `protobuf:\"varint,6,opt,name=limit,def=20\" json:\"limit,omitempty\"`\n\tMatchedCountAccuracy   *int32                    `protobuf:\"varint,7,opt,name=matched_count_accuracy\" json:\"matched_count_accuracy,omitempty\"`\n\tSortSpec               []*SortSpec               `protobuf:\"bytes,8,rep,name=sort_spec\" json:\"sort_spec,omitempty\"`\n\tScorerSpec             *ScorerSpec               `protobuf:\"bytes,9,opt,name=scorer_spec\" json:\"scorer_spec,omitempty\"`\n\tFieldSpec              *FieldSpec                `protobuf:\"bytes,10,opt,name=field_spec\" json:\"field_spec,omitempty\"`\n\tKeysOnly               *bool                     `protobuf:\"varint,12,opt,name=keys_only\" json:\"keys_only,omitempty\"`\n\tParsingMode            *SearchParams_ParsingMode `protobuf:\"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0\" json:\"parsing_mode,omitempty\"`\n\tAutoDiscoverFacetCount *int32                    `protobuf:\"varint,15,opt,name=auto_discover_facet_count,def=0\" json:\"auto_discover_facet_count,omitempty\"`\n\tIncludeFacet           []*FacetRequest           `protobuf:\"bytes,16,rep,name=include_facet\" json:\"include_facet,omitempty\"`\n\tFacetRefinement        []*FacetRefinement        `protobuf:\"bytes,17,rep,name=facet_refinement\" json:\"facet_refinement,omitempty\"`\n\tFacetAutoDetectParam   *FacetAutoDetectParam     `protobuf:\"bytes,18,opt,name=facet_auto_detect_param\" json:\"facet_auto_detect_param,omitempty\"`\n\tFacetDepth             *int32                    `protobuf:\"varint,19,opt,name=facet_depth,def=1000\" json:\"facet_depth,omitempty\"`\n\tXXX_unrecognized       []byte                    `json:\"-\"`\n}\n\nfunc (m *SearchParams) Reset()         { *m = SearchParams{} }\nfunc (m *SearchParams) String() string { return proto.CompactTextString(m) }\nfunc (*SearchParams) ProtoMessage()    {}\n\nconst Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE\nconst Default_SearchParams_Limit int32 = 20\nconst Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT\nconst Default_SearchParams_AutoDiscoverFacetCount int32 = 0\nconst Default_SearchParams_FacetDepth int32 = 1000\n\nfunc (m *SearchParams) GetIndexSpec() *IndexSpec {\n\tif m != nil {\n\t\treturn m.IndexSpec\n\t}\n\treturn nil\n}\n\nfunc (m *SearchParams) GetQuery() string {\n\tif m != nil && m.Query != nil {\n\t\treturn *m.Query\n\t}\n\treturn \"\"\n}\n\nfunc (m *SearchParams) GetCursor() string {\n\tif m != nil && m.Cursor != nil {\n\t\treturn *m.Cursor\n\t}\n\treturn \"\"\n}\n\nfunc (m *SearchParams) GetOffset() int32 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn 0\n}\n\nfunc (m *SearchParams) GetCursorType() SearchParams_CursorType {\n\tif m != nil && m.CursorType != nil {\n\t\treturn *m.CursorType\n\t}\n\treturn Default_SearchParams_CursorType\n}\n\nfunc (m *SearchParams) GetLimit() int32 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn Default_SearchParams_Limit\n}\n\nfunc (m *SearchParams) GetMatchedCountAccuracy() int32 {\n\tif m != nil && m.MatchedCountAccuracy != nil {\n\t\treturn *m.MatchedCountAccuracy\n\t}\n\treturn 0\n}\n\nfunc (m *SearchParams) GetSortSpec() []*SortSpec {\n\tif m != nil {\n\t\treturn m.SortSpec\n\t}\n\treturn nil\n}\n\nfunc (m *SearchParams) GetScorerSpec() *ScorerSpec {\n\tif m != nil {\n\t\treturn m.ScorerSpec\n\t}\n\treturn nil\n}\n\nfunc (m *SearchParams) GetFieldSpec() *FieldSpec {\n\tif m != nil {\n\t\treturn m.FieldSpec\n\t}\n\treturn nil\n}\n\nfunc (m *SearchParams) GetKeysOnly() bool {\n\tif m != nil && m.KeysOnly != nil {\n\t\treturn *m.KeysOnly\n\t}\n\treturn false\n}\n\nfunc (m *SearchParams) GetParsingMode() SearchParams_ParsingMode {\n\tif m != nil && m.ParsingMode != nil {\n\t\treturn *m.ParsingMode\n\t}\n\treturn Default_SearchParams_ParsingMode\n}\n\nfunc (m *SearchParams) GetAutoDiscoverFacetCount() int32 {\n\tif m != nil && m.AutoDiscoverFacetCount != nil {\n\t\treturn *m.AutoDiscoverFacetCount\n\t}\n\treturn Default_SearchParams_AutoDiscoverFacetCount\n}\n\nfunc (m *SearchParams) GetIncludeFacet() []*FacetRequest {\n\tif m != nil {\n\t\treturn m.IncludeFacet\n\t}\n\treturn nil\n}\n\nfunc (m *SearchParams) GetFacetRefinement() []*FacetRefinement {\n\tif m != nil {\n\t\treturn m.FacetRefinement\n\t}\n\treturn nil\n}\n\nfunc (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam {\n\tif m != nil {\n\t\treturn m.FacetAutoDetectParam\n\t}\n\treturn nil\n}\n\nfunc (m *SearchParams) GetFacetDepth() int32 {\n\tif m != nil && m.FacetDepth != nil {\n\t\treturn *m.FacetDepth\n\t}\n\treturn Default_SearchParams_FacetDepth\n}\n\ntype SearchRequest struct {\n\tParams           *SearchParams `protobuf:\"bytes,1,req,name=params\" json:\"params,omitempty\"`\n\tAppId            []byte        `protobuf:\"bytes,3,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte        `json:\"-\"`\n}\n\nfunc (m *SearchRequest) Reset()         { *m = SearchRequest{} }\nfunc (m *SearchRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SearchRequest) ProtoMessage()    {}\n\nfunc (m *SearchRequest) GetParams() *SearchParams {\n\tif m != nil {\n\t\treturn m.Params\n\t}\n\treturn nil\n}\n\nfunc (m *SearchRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype FacetResultValue struct {\n\tName             *string          `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tCount            *int32           `protobuf:\"varint,2,req,name=count\" json:\"count,omitempty\"`\n\tRefinement       *FacetRefinement `protobuf:\"bytes,3,req,name=refinement\" json:\"refinement,omitempty\"`\n\tXXX_unrecognized []byte           `json:\"-\"`\n}\n\nfunc (m *FacetResultValue) Reset()         { *m = FacetResultValue{} }\nfunc (m *FacetResultValue) String() string { return proto.CompactTextString(m) }\nfunc (*FacetResultValue) ProtoMessage()    {}\n\nfunc (m *FacetResultValue) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetResultValue) GetCount() int32 {\n\tif m != nil && m.Count != nil {\n\t\treturn *m.Count\n\t}\n\treturn 0\n}\n\nfunc (m *FacetResultValue) GetRefinement() *FacetRefinement {\n\tif m != nil {\n\t\treturn m.Refinement\n\t}\n\treturn nil\n}\n\ntype FacetResult struct {\n\tName             *string             `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tValue            []*FacetResultValue `protobuf:\"bytes,2,rep,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte              `json:\"-\"`\n}\n\nfunc (m *FacetResult) Reset()         { *m = FacetResult{} }\nfunc (m *FacetResult) String() string { return proto.CompactTextString(m) }\nfunc (*FacetResult) ProtoMessage()    {}\n\nfunc (m *FacetResult) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *FacetResult) GetValue() []*FacetResultValue {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype SearchResult struct {\n\tDocument         *Document `protobuf:\"bytes,1,req,name=document\" json:\"document,omitempty\"`\n\tExpression       []*Field  `protobuf:\"bytes,4,rep,name=expression\" json:\"expression,omitempty\"`\n\tScore            []float64 `protobuf:\"fixed64,2,rep,name=score\" json:\"score,omitempty\"`\n\tCursor           *string   `protobuf:\"bytes,3,opt,name=cursor\" json:\"cursor,omitempty\"`\n\tXXX_unrecognized []byte    `json:\"-\"`\n}\n\nfunc (m *SearchResult) Reset()         { *m = SearchResult{} }\nfunc (m *SearchResult) String() string { return proto.CompactTextString(m) }\nfunc (*SearchResult) ProtoMessage()    {}\n\nfunc (m *SearchResult) GetDocument() *Document {\n\tif m != nil {\n\t\treturn m.Document\n\t}\n\treturn nil\n}\n\nfunc (m *SearchResult) GetExpression() []*Field {\n\tif m != nil {\n\t\treturn m.Expression\n\t}\n\treturn nil\n}\n\nfunc (m *SearchResult) GetScore() []float64 {\n\tif m != nil {\n\t\treturn m.Score\n\t}\n\treturn nil\n}\n\nfunc (m *SearchResult) GetCursor() string {\n\tif m != nil && m.Cursor != nil {\n\t\treturn *m.Cursor\n\t}\n\treturn \"\"\n}\n\ntype SearchResponse struct {\n\tResult           []*SearchResult           `protobuf:\"bytes,1,rep,name=result\" json:\"result,omitempty\"`\n\tMatchedCount     *int64                    `protobuf:\"varint,2,req,name=matched_count\" json:\"matched_count,omitempty\"`\n\tStatus           *RequestStatus            `protobuf:\"bytes,3,req,name=status\" json:\"status,omitempty\"`\n\tCursor           *string                   `protobuf:\"bytes,4,opt,name=cursor\" json:\"cursor,omitempty\"`\n\tFacetResult      []*FacetResult            `protobuf:\"bytes,5,rep,name=facet_result\" json:\"facet_result,omitempty\"`\n\tXXX_extensions   map[int32]proto.Extension `json:\"-\"`\n\tXXX_unrecognized []byte                    `json:\"-\"`\n}\n\nfunc (m *SearchResponse) Reset()         { *m = SearchResponse{} }\nfunc (m *SearchResponse) String() string { return proto.CompactTextString(m) }\nfunc (*SearchResponse) ProtoMessage()    {}\n\nvar extRange_SearchResponse = []proto.ExtensionRange{\n\t{1000, 9999},\n}\n\nfunc (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {\n\treturn extRange_SearchResponse\n}\nfunc (m *SearchResponse) ExtensionMap() map[int32]proto.Extension {\n\tif m.XXX_extensions == nil {\n\t\tm.XXX_extensions = make(map[int32]proto.Extension)\n\t}\n\treturn m.XXX_extensions\n}\n\nfunc (m *SearchResponse) GetResult() []*SearchResult {\n\tif m != nil {\n\t\treturn m.Result\n\t}\n\treturn nil\n}\n\nfunc (m *SearchResponse) GetMatchedCount() int64 {\n\tif m != nil && m.MatchedCount != nil {\n\t\treturn *m.MatchedCount\n\t}\n\treturn 0\n}\n\nfunc (m *SearchResponse) GetStatus() *RequestStatus {\n\tif m != nil {\n\t\treturn m.Status\n\t}\n\treturn nil\n}\n\nfunc (m *SearchResponse) GetCursor() string {\n\tif m != nil && m.Cursor != nil {\n\t\treturn *m.Cursor\n\t}\n\treturn \"\"\n}\n\nfunc (m *SearchResponse) GetFacetResult() []*FacetResult {\n\tif m != nil {\n\t\treturn m.FacetResult\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/search/search.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"search\";\n\npackage search;\n\nmessage Scope {\n  enum Type {\n    USER_BY_CANONICAL_ID = 1;\n    USER_BY_EMAIL = 2;\n    GROUP_BY_CANONICAL_ID = 3;\n    GROUP_BY_EMAIL = 4;\n    GROUP_BY_DOMAIN = 5;\n    ALL_USERS = 6;\n    ALL_AUTHENTICATED_USERS = 7;\n  }\n\n  optional Type type = 1;\n  optional string value = 2;\n}\n\nmessage Entry {\n  enum Permission {\n    READ = 1;\n    WRITE = 2;\n    FULL_CONTROL = 3;\n  }\n\n  optional Scope scope = 1;\n  optional Permission permission = 2;\n  optional string display_name = 3;\n}\n\nmessage AccessControlList {\n  optional string owner = 1;\n  repeated Entry entries = 2;\n}\n\nmessage FieldValue {\n  enum ContentType {\n    TEXT = 0;\n    HTML = 1;\n    ATOM = 2;\n    DATE = 3;\n    NUMBER = 4;\n    GEO = 5;\n  }\n\n  optional ContentType type = 1 [default = TEXT];\n\n  optional string language = 2 [default = \"en\"];\n\n  optional string string_value = 3;\n\n  optional group Geo = 4 {\n    required double lat = 5;\n    required double lng = 6;\n  }\n}\n\nmessage Field {\n  required string name = 1;\n  required FieldValue value = 2;\n}\n\nmessage FieldTypes {\n  required string name = 1;\n  repeated FieldValue.ContentType type = 2;\n}\n\nmessage IndexShardSettings {\n  repeated int32 prev_num_shards = 1;\n  required int32 num_shards = 2 [default=1];\n  repeated int32 prev_num_shards_search_false = 3;\n  optional string local_replica = 4 [default = \"\"];\n}\n\nmessage FacetValue {\n  enum ContentType {\n    ATOM = 2;\n    NUMBER = 4;\n  }\n\n  optional ContentType type = 1 [default = ATOM];\n  optional string string_value = 3;\n}\n\nmessage Facet {\n  required string name = 1;\n  required FacetValue value = 2;\n}\n\nmessage DocumentMetadata  {\n  optional int64 version = 1;\n  optional int64 committed_st_version = 2;\n}\n\nmessage Document {\n  optional string id = 1;\n  optional string language = 2 [default = \"en\"];\n  repeated Field field = 3;\n  optional int32 order_id = 4;\n\n  enum Storage {\n    DISK = 0;\n  }\n\n  optional Storage storage = 5 [default = DISK];\n  repeated Facet facet = 8;\n}\n\nmessage SearchServiceError {\n  enum ErrorCode {\n    OK = 0;\n    INVALID_REQUEST = 1;\n    TRANSIENT_ERROR = 2;\n    INTERNAL_ERROR = 3;\n    PERMISSION_DENIED = 4;\n    TIMEOUT = 5;\n    CONCURRENT_TRANSACTION = 6;\n  }\n}\n\nmessage RequestStatus {\n  required SearchServiceError.ErrorCode code = 1;\n  optional string error_detail = 2;\n  optional int32 canonical_code = 3;\n}\n\nmessage IndexSpec {\n  required string name = 1;\n\n  enum Consistency {\n    GLOBAL = 0;\n    PER_DOCUMENT = 1;\n  }\n  optional Consistency consistency = 2 [default = PER_DOCUMENT];\n\n  optional string namespace = 3;\n  optional int32 version = 4;\n\n  enum Source {\n    SEARCH = 0;\n    DATASTORE = 1;\n    CLOUD_STORAGE = 2;\n  }\n  optional Source source = 5 [default = SEARCH];\n\n  enum Mode {\n    PRIORITY = 0;\n    BACKGROUND = 1;\n  }\n  optional Mode mode = 6 [default = PRIORITY];\n}\n\nmessage IndexMetadata {\n  required IndexSpec index_spec = 1;\n\n  repeated FieldTypes field = 2;\n\n  message Storage {\n    optional int64 amount_used = 1;\n    optional int64 limit = 2;\n  }\n  optional Storage storage = 3;\n}\n\nmessage IndexDocumentParams {\n  repeated Document document = 1;\n\n  enum Freshness {\n    SYNCHRONOUSLY = 0;\n    WHEN_CONVENIENT = 1;\n  }\n  optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true];\n\n  required IndexSpec index_spec = 3;\n}\n\nmessage IndexDocumentRequest {\n  required IndexDocumentParams params = 1;\n\n  optional bytes app_id = 3;\n}\n\nmessage IndexDocumentResponse {\n  repeated RequestStatus status = 1;\n\n  repeated string doc_id = 2;\n}\n\nmessage DeleteDocumentParams {\n  repeated string doc_id = 1;\n\n  required IndexSpec index_spec = 2;\n}\n\nmessage DeleteDocumentRequest {\n  required DeleteDocumentParams params = 1;\n\n  optional bytes app_id = 3;\n}\n\nmessage DeleteDocumentResponse {\n  repeated RequestStatus status = 1;\n}\n\nmessage ListDocumentsParams {\n  required IndexSpec index_spec = 1;\n  optional string start_doc_id = 2;\n  optional bool include_start_doc = 3 [default = true];\n  optional int32 limit = 4 [default = 100];\n  optional bool keys_only = 5;\n}\n\nmessage ListDocumentsRequest {\n  required ListDocumentsParams params = 1;\n\n  optional bytes app_id = 2;\n}\n\nmessage ListDocumentsResponse {\n  required RequestStatus status = 1;\n\n  repeated Document document = 2;\n}\n\nmessage ListIndexesParams {\n  optional bool fetch_schema = 1;\n  optional int32 limit = 2 [default = 20];\n  optional string namespace = 3;\n  optional string start_index_name = 4;\n  optional bool include_start_index = 5 [default = true];\n  optional string index_name_prefix = 6;\n  optional int32 offset = 7;\n  optional IndexSpec.Source source = 8 [default = SEARCH];\n}\n\nmessage ListIndexesRequest {\n  required ListIndexesParams params = 1;\n\n  optional bytes app_id = 3;\n}\n\nmessage ListIndexesResponse {\n  required RequestStatus status = 1;\n  repeated IndexMetadata index_metadata = 2;\n}\n\nmessage DeleteSchemaParams {\n  optional IndexSpec.Source source = 1 [default = SEARCH];\n  repeated IndexSpec index_spec = 2;\n}\n\nmessage DeleteSchemaRequest {\n  required DeleteSchemaParams params = 1;\n\n  optional bytes app_id = 3;\n}\n\nmessage DeleteSchemaResponse {\n  repeated RequestStatus status = 1;\n}\n\nmessage SortSpec {\n  required string sort_expression = 1;\n  optional bool sort_descending = 2 [default = true];\n  optional string default_value_text = 4;\n  optional double default_value_numeric = 5;\n}\n\nmessage ScorerSpec {\n  enum Scorer {\n    RESCORING_MATCH_SCORER = 0;\n    MATCH_SCORER = 2;\n  }\n  optional Scorer scorer = 1 [default = MATCH_SCORER];\n\n  optional int32 limit = 2 [default = 1000];\n  optional string match_scorer_parameters = 9;\n}\n\nmessage FieldSpec {\n  repeated string name = 1;\n\n  repeated group Expression = 2 {\n    required string name = 3;\n    required string expression = 4;\n  }\n}\n\nmessage FacetRange {\n  optional string name = 1;\n  optional string start = 2;\n  optional string end = 3;\n}\n\nmessage FacetRequestParam {\n  optional int32 value_limit = 1;\n  repeated FacetRange range = 2;\n  repeated string value_constraint = 3;\n}\n\nmessage FacetAutoDetectParam {\n  optional int32 value_limit = 1 [default = 10];\n}\n\nmessage FacetRequest {\n  required string name = 1;\n  optional FacetRequestParam params = 2;\n}\n\nmessage FacetRefinement {\n  required string name = 1;\n  optional string value = 2;\n\n  message Range {\n    optional string start = 1;\n    optional string end = 2;\n  }\n  optional Range range = 3;\n}\n\nmessage SearchParams {\n  required IndexSpec index_spec = 1;\n  required string query = 2;\n  optional string cursor = 4;\n  optional int32 offset = 11;\n\n  enum CursorType {\n    NONE = 0;\n    SINGLE = 1;\n    PER_RESULT = 2;\n  }\n  optional CursorType cursor_type = 5 [default = NONE];\n\n  optional int32 limit = 6 [default = 20];\n  optional int32 matched_count_accuracy = 7;\n  repeated SortSpec sort_spec = 8;\n  optional ScorerSpec scorer_spec = 9;\n  optional FieldSpec field_spec = 10;\n  optional bool keys_only = 12;\n\n  enum ParsingMode {\n    STRICT = 0;\n    RELAXED = 1;\n  }\n  optional ParsingMode parsing_mode = 13 [default = STRICT];\n\n  optional int32 auto_discover_facet_count = 15 [default = 0];\n  repeated FacetRequest include_facet = 16;\n  repeated FacetRefinement facet_refinement = 17;\n  optional FacetAutoDetectParam facet_auto_detect_param = 18;\n  optional int32 facet_depth = 19 [default=1000];\n}\n\nmessage SearchRequest {\n  required SearchParams params = 1;\n\n  optional bytes app_id = 3;\n}\n\nmessage FacetResultValue {\n  required string name = 1;\n  required int32 count = 2;\n  required FacetRefinement refinement = 3;\n}\n\nmessage FacetResult {\n  required string name = 1;\n  repeated FacetResultValue value = 2;\n}\n\nmessage SearchResult {\n  required Document document = 1;\n  repeated Field expression = 4;\n  repeated double score = 2;\n  optional string cursor = 3;\n}\n\nmessage SearchResponse {\n  repeated SearchResult result = 1;\n  required int64 matched_count = 2;\n  required RequestStatus status = 3;\n  optional string cursor = 4;\n  repeated FacetResult facet_result = 5;\n\n  extensions 1000 to 9999;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/socket/socket_service.proto\n// DO NOT EDIT!\n\n/*\nPackage socket is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/socket/socket_service.proto\n\nIt has these top-level messages:\n\tRemoteSocketServiceError\n\tAddressPort\n\tCreateSocketRequest\n\tCreateSocketReply\n\tBindRequest\n\tBindReply\n\tGetSocketNameRequest\n\tGetSocketNameReply\n\tGetPeerNameRequest\n\tGetPeerNameReply\n\tSocketOption\n\tSetSocketOptionsRequest\n\tSetSocketOptionsReply\n\tGetSocketOptionsRequest\n\tGetSocketOptionsReply\n\tConnectRequest\n\tConnectReply\n\tListenRequest\n\tListenReply\n\tAcceptRequest\n\tAcceptReply\n\tShutDownRequest\n\tShutDownReply\n\tCloseRequest\n\tCloseReply\n\tSendRequest\n\tSendReply\n\tReceiveRequest\n\tReceiveReply\n\tPollEvent\n\tPollRequest\n\tPollReply\n\tResolveRequest\n\tResolveReply\n*/\npackage socket\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype RemoteSocketServiceError_ErrorCode int32\n\nconst (\n\tRemoteSocketServiceError_SYSTEM_ERROR      RemoteSocketServiceError_ErrorCode = 1\n\tRemoteSocketServiceError_GAI_ERROR         RemoteSocketServiceError_ErrorCode = 2\n\tRemoteSocketServiceError_FAILURE           RemoteSocketServiceError_ErrorCode = 4\n\tRemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5\n\tRemoteSocketServiceError_INVALID_REQUEST   RemoteSocketServiceError_ErrorCode = 6\n\tRemoteSocketServiceError_SOCKET_CLOSED     RemoteSocketServiceError_ErrorCode = 7\n)\n\nvar RemoteSocketServiceError_ErrorCode_name = map[int32]string{\n\t1: \"SYSTEM_ERROR\",\n\t2: \"GAI_ERROR\",\n\t4: \"FAILURE\",\n\t5: \"PERMISSION_DENIED\",\n\t6: \"INVALID_REQUEST\",\n\t7: \"SOCKET_CLOSED\",\n}\nvar RemoteSocketServiceError_ErrorCode_value = map[string]int32{\n\t\"SYSTEM_ERROR\":      1,\n\t\"GAI_ERROR\":         2,\n\t\"FAILURE\":           4,\n\t\"PERMISSION_DENIED\": 5,\n\t\"INVALID_REQUEST\":   6,\n\t\"SOCKET_CLOSED\":     7,\n}\n\nfunc (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode {\n\tp := new(RemoteSocketServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x RemoteSocketServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, \"RemoteSocketServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = RemoteSocketServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype RemoteSocketServiceError_SystemError int32\n\nconst (\n\tRemoteSocketServiceError_SYS_SUCCESS         RemoteSocketServiceError_SystemError = 0\n\tRemoteSocketServiceError_SYS_EPERM           RemoteSocketServiceError_SystemError = 1\n\tRemoteSocketServiceError_SYS_ENOENT          RemoteSocketServiceError_SystemError = 2\n\tRemoteSocketServiceError_SYS_ESRCH           RemoteSocketServiceError_SystemError = 3\n\tRemoteSocketServiceError_SYS_EINTR           RemoteSocketServiceError_SystemError = 4\n\tRemoteSocketServiceError_SYS_EIO             RemoteSocketServiceError_SystemError = 5\n\tRemoteSocketServiceError_SYS_ENXIO           RemoteSocketServiceError_SystemError = 6\n\tRemoteSocketServiceError_SYS_E2BIG           RemoteSocketServiceError_SystemError = 7\n\tRemoteSocketServiceError_SYS_ENOEXEC         RemoteSocketServiceError_SystemError = 8\n\tRemoteSocketServiceError_SYS_EBADF           RemoteSocketServiceError_SystemError = 9\n\tRemoteSocketServiceError_SYS_ECHILD          RemoteSocketServiceError_SystemError = 10\n\tRemoteSocketServiceError_SYS_EAGAIN          RemoteSocketServiceError_SystemError = 11\n\tRemoteSocketServiceError_SYS_EWOULDBLOCK     RemoteSocketServiceError_SystemError = 11\n\tRemoteSocketServiceError_SYS_ENOMEM          RemoteSocketServiceError_SystemError = 12\n\tRemoteSocketServiceError_SYS_EACCES          RemoteSocketServiceError_SystemError = 13\n\tRemoteSocketServiceError_SYS_EFAULT          RemoteSocketServiceError_SystemError = 14\n\tRemoteSocketServiceError_SYS_ENOTBLK         RemoteSocketServiceError_SystemError = 15\n\tRemoteSocketServiceError_SYS_EBUSY           RemoteSocketServiceError_SystemError = 16\n\tRemoteSocketServiceError_SYS_EEXIST          RemoteSocketServiceError_SystemError = 17\n\tRemoteSocketServiceError_SYS_EXDEV           RemoteSocketServiceError_SystemError = 18\n\tRemoteSocketServiceError_SYS_ENODEV          RemoteSocketServiceError_SystemError = 19\n\tRemoteSocketServiceError_SYS_ENOTDIR         RemoteSocketServiceError_SystemError = 20\n\tRemoteSocketServiceError_SYS_EISDIR          RemoteSocketServiceError_SystemError = 21\n\tRemoteSocketServiceError_SYS_EINVAL          RemoteSocketServiceError_SystemError = 22\n\tRemoteSocketServiceError_SYS_ENFILE          RemoteSocketServiceError_SystemError = 23\n\tRemoteSocketServiceError_SYS_EMFILE          RemoteSocketServiceError_SystemError = 24\n\tRemoteSocketServiceError_SYS_ENOTTY          RemoteSocketServiceError_SystemError = 25\n\tRemoteSocketServiceError_SYS_ETXTBSY         RemoteSocketServiceError_SystemError = 26\n\tRemoteSocketServiceError_SYS_EFBIG           RemoteSocketServiceError_SystemError = 27\n\tRemoteSocketServiceError_SYS_ENOSPC          RemoteSocketServiceError_SystemError = 28\n\tRemoteSocketServiceError_SYS_ESPIPE          RemoteSocketServiceError_SystemError = 29\n\tRemoteSocketServiceError_SYS_EROFS           RemoteSocketServiceError_SystemError = 30\n\tRemoteSocketServiceError_SYS_EMLINK          RemoteSocketServiceError_SystemError = 31\n\tRemoteSocketServiceError_SYS_EPIPE           RemoteSocketServiceError_SystemError = 32\n\tRemoteSocketServiceError_SYS_EDOM            RemoteSocketServiceError_SystemError = 33\n\tRemoteSocketServiceError_SYS_ERANGE          RemoteSocketServiceError_SystemError = 34\n\tRemoteSocketServiceError_SYS_EDEADLK         RemoteSocketServiceError_SystemError = 35\n\tRemoteSocketServiceError_SYS_EDEADLOCK       RemoteSocketServiceError_SystemError = 35\n\tRemoteSocketServiceError_SYS_ENAMETOOLONG    RemoteSocketServiceError_SystemError = 36\n\tRemoteSocketServiceError_SYS_ENOLCK          RemoteSocketServiceError_SystemError = 37\n\tRemoteSocketServiceError_SYS_ENOSYS          RemoteSocketServiceError_SystemError = 38\n\tRemoteSocketServiceError_SYS_ENOTEMPTY       RemoteSocketServiceError_SystemError = 39\n\tRemoteSocketServiceError_SYS_ELOOP           RemoteSocketServiceError_SystemError = 40\n\tRemoteSocketServiceError_SYS_ENOMSG          RemoteSocketServiceError_SystemError = 42\n\tRemoteSocketServiceError_SYS_EIDRM           RemoteSocketServiceError_SystemError = 43\n\tRemoteSocketServiceError_SYS_ECHRNG          RemoteSocketServiceError_SystemError = 44\n\tRemoteSocketServiceError_SYS_EL2NSYNC        RemoteSocketServiceError_SystemError = 45\n\tRemoteSocketServiceError_SYS_EL3HLT          RemoteSocketServiceError_SystemError = 46\n\tRemoteSocketServiceError_SYS_EL3RST          RemoteSocketServiceError_SystemError = 47\n\tRemoteSocketServiceError_SYS_ELNRNG          RemoteSocketServiceError_SystemError = 48\n\tRemoteSocketServiceError_SYS_EUNATCH         RemoteSocketServiceError_SystemError = 49\n\tRemoteSocketServiceError_SYS_ENOCSI          RemoteSocketServiceError_SystemError = 50\n\tRemoteSocketServiceError_SYS_EL2HLT          RemoteSocketServiceError_SystemError = 51\n\tRemoteSocketServiceError_SYS_EBADE           RemoteSocketServiceError_SystemError = 52\n\tRemoteSocketServiceError_SYS_EBADR           RemoteSocketServiceError_SystemError = 53\n\tRemoteSocketServiceError_SYS_EXFULL          RemoteSocketServiceError_SystemError = 54\n\tRemoteSocketServiceError_SYS_ENOANO          RemoteSocketServiceError_SystemError = 55\n\tRemoteSocketServiceError_SYS_EBADRQC         RemoteSocketServiceError_SystemError = 56\n\tRemoteSocketServiceError_SYS_EBADSLT         RemoteSocketServiceError_SystemError = 57\n\tRemoteSocketServiceError_SYS_EBFONT          RemoteSocketServiceError_SystemError = 59\n\tRemoteSocketServiceError_SYS_ENOSTR          RemoteSocketServiceError_SystemError = 60\n\tRemoteSocketServiceError_SYS_ENODATA         RemoteSocketServiceError_SystemError = 61\n\tRemoteSocketServiceError_SYS_ETIME           RemoteSocketServiceError_SystemError = 62\n\tRemoteSocketServiceError_SYS_ENOSR           RemoteSocketServiceError_SystemError = 63\n\tRemoteSocketServiceError_SYS_ENONET          RemoteSocketServiceError_SystemError = 64\n\tRemoteSocketServiceError_SYS_ENOPKG          RemoteSocketServiceError_SystemError = 65\n\tRemoteSocketServiceError_SYS_EREMOTE         RemoteSocketServiceError_SystemError = 66\n\tRemoteSocketServiceError_SYS_ENOLINK         RemoteSocketServiceError_SystemError = 67\n\tRemoteSocketServiceError_SYS_EADV            RemoteSocketServiceError_SystemError = 68\n\tRemoteSocketServiceError_SYS_ESRMNT          RemoteSocketServiceError_SystemError = 69\n\tRemoteSocketServiceError_SYS_ECOMM           RemoteSocketServiceError_SystemError = 70\n\tRemoteSocketServiceError_SYS_EPROTO          RemoteSocketServiceError_SystemError = 71\n\tRemoteSocketServiceError_SYS_EMULTIHOP       RemoteSocketServiceError_SystemError = 72\n\tRemoteSocketServiceError_SYS_EDOTDOT         RemoteSocketServiceError_SystemError = 73\n\tRemoteSocketServiceError_SYS_EBADMSG         RemoteSocketServiceError_SystemError = 74\n\tRemoteSocketServiceError_SYS_EOVERFLOW       RemoteSocketServiceError_SystemError = 75\n\tRemoteSocketServiceError_SYS_ENOTUNIQ        RemoteSocketServiceError_SystemError = 76\n\tRemoteSocketServiceError_SYS_EBADFD          RemoteSocketServiceError_SystemError = 77\n\tRemoteSocketServiceError_SYS_EREMCHG         RemoteSocketServiceError_SystemError = 78\n\tRemoteSocketServiceError_SYS_ELIBACC         RemoteSocketServiceError_SystemError = 79\n\tRemoteSocketServiceError_SYS_ELIBBAD         RemoteSocketServiceError_SystemError = 80\n\tRemoteSocketServiceError_SYS_ELIBSCN         RemoteSocketServiceError_SystemError = 81\n\tRemoteSocketServiceError_SYS_ELIBMAX         RemoteSocketServiceError_SystemError = 82\n\tRemoteSocketServiceError_SYS_ELIBEXEC        RemoteSocketServiceError_SystemError = 83\n\tRemoteSocketServiceError_SYS_EILSEQ          RemoteSocketServiceError_SystemError = 84\n\tRemoteSocketServiceError_SYS_ERESTART        RemoteSocketServiceError_SystemError = 85\n\tRemoteSocketServiceError_SYS_ESTRPIPE        RemoteSocketServiceError_SystemError = 86\n\tRemoteSocketServiceError_SYS_EUSERS          RemoteSocketServiceError_SystemError = 87\n\tRemoteSocketServiceError_SYS_ENOTSOCK        RemoteSocketServiceError_SystemError = 88\n\tRemoteSocketServiceError_SYS_EDESTADDRREQ    RemoteSocketServiceError_SystemError = 89\n\tRemoteSocketServiceError_SYS_EMSGSIZE        RemoteSocketServiceError_SystemError = 90\n\tRemoteSocketServiceError_SYS_EPROTOTYPE      RemoteSocketServiceError_SystemError = 91\n\tRemoteSocketServiceError_SYS_ENOPROTOOPT     RemoteSocketServiceError_SystemError = 92\n\tRemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93\n\tRemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94\n\tRemoteSocketServiceError_SYS_EOPNOTSUPP      RemoteSocketServiceError_SystemError = 95\n\tRemoteSocketServiceError_SYS_ENOTSUP         RemoteSocketServiceError_SystemError = 95\n\tRemoteSocketServiceError_SYS_EPFNOSUPPORT    RemoteSocketServiceError_SystemError = 96\n\tRemoteSocketServiceError_SYS_EAFNOSUPPORT    RemoteSocketServiceError_SystemError = 97\n\tRemoteSocketServiceError_SYS_EADDRINUSE      RemoteSocketServiceError_SystemError = 98\n\tRemoteSocketServiceError_SYS_EADDRNOTAVAIL   RemoteSocketServiceError_SystemError = 99\n\tRemoteSocketServiceError_SYS_ENETDOWN        RemoteSocketServiceError_SystemError = 100\n\tRemoteSocketServiceError_SYS_ENETUNREACH     RemoteSocketServiceError_SystemError = 101\n\tRemoteSocketServiceError_SYS_ENETRESET       RemoteSocketServiceError_SystemError = 102\n\tRemoteSocketServiceError_SYS_ECONNABORTED    RemoteSocketServiceError_SystemError = 103\n\tRemoteSocketServiceError_SYS_ECONNRESET      RemoteSocketServiceError_SystemError = 104\n\tRemoteSocketServiceError_SYS_ENOBUFS         RemoteSocketServiceError_SystemError = 105\n\tRemoteSocketServiceError_SYS_EISCONN         RemoteSocketServiceError_SystemError = 106\n\tRemoteSocketServiceError_SYS_ENOTCONN        RemoteSocketServiceError_SystemError = 107\n\tRemoteSocketServiceError_SYS_ESHUTDOWN       RemoteSocketServiceError_SystemError = 108\n\tRemoteSocketServiceError_SYS_ETOOMANYREFS    RemoteSocketServiceError_SystemError = 109\n\tRemoteSocketServiceError_SYS_ETIMEDOUT       RemoteSocketServiceError_SystemError = 110\n\tRemoteSocketServiceError_SYS_ECONNREFUSED    RemoteSocketServiceError_SystemError = 111\n\tRemoteSocketServiceError_SYS_EHOSTDOWN       RemoteSocketServiceError_SystemError = 112\n\tRemoteSocketServiceError_SYS_EHOSTUNREACH    RemoteSocketServiceError_SystemError = 113\n\tRemoteSocketServiceError_SYS_EALREADY        RemoteSocketServiceError_SystemError = 114\n\tRemoteSocketServiceError_SYS_EINPROGRESS     RemoteSocketServiceError_SystemError = 115\n\tRemoteSocketServiceError_SYS_ESTALE          RemoteSocketServiceError_SystemError = 116\n\tRemoteSocketServiceError_SYS_EUCLEAN         RemoteSocketServiceError_SystemError = 117\n\tRemoteSocketServiceError_SYS_ENOTNAM         RemoteSocketServiceError_SystemError = 118\n\tRemoteSocketServiceError_SYS_ENAVAIL         RemoteSocketServiceError_SystemError = 119\n\tRemoteSocketServiceError_SYS_EISNAM          RemoteSocketServiceError_SystemError = 120\n\tRemoteSocketServiceError_SYS_EREMOTEIO       RemoteSocketServiceError_SystemError = 121\n\tRemoteSocketServiceError_SYS_EDQUOT          RemoteSocketServiceError_SystemError = 122\n\tRemoteSocketServiceError_SYS_ENOMEDIUM       RemoteSocketServiceError_SystemError = 123\n\tRemoteSocketServiceError_SYS_EMEDIUMTYPE     RemoteSocketServiceError_SystemError = 124\n\tRemoteSocketServiceError_SYS_ECANCELED       RemoteSocketServiceError_SystemError = 125\n\tRemoteSocketServiceError_SYS_ENOKEY          RemoteSocketServiceError_SystemError = 126\n\tRemoteSocketServiceError_SYS_EKEYEXPIRED     RemoteSocketServiceError_SystemError = 127\n\tRemoteSocketServiceError_SYS_EKEYREVOKED     RemoteSocketServiceError_SystemError = 128\n\tRemoteSocketServiceError_SYS_EKEYREJECTED    RemoteSocketServiceError_SystemError = 129\n\tRemoteSocketServiceError_SYS_EOWNERDEAD      RemoteSocketServiceError_SystemError = 130\n\tRemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131\n\tRemoteSocketServiceError_SYS_ERFKILL         RemoteSocketServiceError_SystemError = 132\n)\n\nvar RemoteSocketServiceError_SystemError_name = map[int32]string{\n\t0:  \"SYS_SUCCESS\",\n\t1:  \"SYS_EPERM\",\n\t2:  \"SYS_ENOENT\",\n\t3:  \"SYS_ESRCH\",\n\t4:  \"SYS_EINTR\",\n\t5:  \"SYS_EIO\",\n\t6:  \"SYS_ENXIO\",\n\t7:  \"SYS_E2BIG\",\n\t8:  \"SYS_ENOEXEC\",\n\t9:  \"SYS_EBADF\",\n\t10: \"SYS_ECHILD\",\n\t11: \"SYS_EAGAIN\",\n\t// Duplicate value: 11: \"SYS_EWOULDBLOCK\",\n\t12: \"SYS_ENOMEM\",\n\t13: \"SYS_EACCES\",\n\t14: \"SYS_EFAULT\",\n\t15: \"SYS_ENOTBLK\",\n\t16: \"SYS_EBUSY\",\n\t17: \"SYS_EEXIST\",\n\t18: \"SYS_EXDEV\",\n\t19: \"SYS_ENODEV\",\n\t20: \"SYS_ENOTDIR\",\n\t21: \"SYS_EISDIR\",\n\t22: \"SYS_EINVAL\",\n\t23: \"SYS_ENFILE\",\n\t24: \"SYS_EMFILE\",\n\t25: \"SYS_ENOTTY\",\n\t26: \"SYS_ETXTBSY\",\n\t27: \"SYS_EFBIG\",\n\t28: \"SYS_ENOSPC\",\n\t29: \"SYS_ESPIPE\",\n\t30: \"SYS_EROFS\",\n\t31: \"SYS_EMLINK\",\n\t32: \"SYS_EPIPE\",\n\t33: \"SYS_EDOM\",\n\t34: \"SYS_ERANGE\",\n\t35: \"SYS_EDEADLK\",\n\t// Duplicate value: 35: \"SYS_EDEADLOCK\",\n\t36: \"SYS_ENAMETOOLONG\",\n\t37: \"SYS_ENOLCK\",\n\t38: \"SYS_ENOSYS\",\n\t39: \"SYS_ENOTEMPTY\",\n\t40: \"SYS_ELOOP\",\n\t42: \"SYS_ENOMSG\",\n\t43: \"SYS_EIDRM\",\n\t44: \"SYS_ECHRNG\",\n\t45: \"SYS_EL2NSYNC\",\n\t46: \"SYS_EL3HLT\",\n\t47: \"SYS_EL3RST\",\n\t48: \"SYS_ELNRNG\",\n\t49: \"SYS_EUNATCH\",\n\t50: \"SYS_ENOCSI\",\n\t51: \"SYS_EL2HLT\",\n\t52: \"SYS_EBADE\",\n\t53: \"SYS_EBADR\",\n\t54: \"SYS_EXFULL\",\n\t55: \"SYS_ENOANO\",\n\t56: \"SYS_EBADRQC\",\n\t57: \"SYS_EBADSLT\",\n\t59: \"SYS_EBFONT\",\n\t60: \"SYS_ENOSTR\",\n\t61: \"SYS_ENODATA\",\n\t62: \"SYS_ETIME\",\n\t63: \"SYS_ENOSR\",\n\t64: \"SYS_ENONET\",\n\t65: \"SYS_ENOPKG\",\n\t66: \"SYS_EREMOTE\",\n\t67: \"SYS_ENOLINK\",\n\t68: \"SYS_EADV\",\n\t69: \"SYS_ESRMNT\",\n\t70: \"SYS_ECOMM\",\n\t71: \"SYS_EPROTO\",\n\t72: \"SYS_EMULTIHOP\",\n\t73: \"SYS_EDOTDOT\",\n\t74: \"SYS_EBADMSG\",\n\t75: \"SYS_EOVERFLOW\",\n\t76: \"SYS_ENOTUNIQ\",\n\t77: \"SYS_EBADFD\",\n\t78: \"SYS_EREMCHG\",\n\t79: \"SYS_ELIBACC\",\n\t80: \"SYS_ELIBBAD\",\n\t81: \"SYS_ELIBSCN\",\n\t82: \"SYS_ELIBMAX\",\n\t83: \"SYS_ELIBEXEC\",\n\t84: \"SYS_EILSEQ\",\n\t85: \"SYS_ERESTART\",\n\t86: \"SYS_ESTRPIPE\",\n\t87: \"SYS_EUSERS\",\n\t88: \"SYS_ENOTSOCK\",\n\t89: \"SYS_EDESTADDRREQ\",\n\t90: \"SYS_EMSGSIZE\",\n\t91: \"SYS_EPROTOTYPE\",\n\t92: \"SYS_ENOPROTOOPT\",\n\t93: \"SYS_EPROTONOSUPPORT\",\n\t94: \"SYS_ESOCKTNOSUPPORT\",\n\t95: \"SYS_EOPNOTSUPP\",\n\t// Duplicate value: 95: \"SYS_ENOTSUP\",\n\t96:  \"SYS_EPFNOSUPPORT\",\n\t97:  \"SYS_EAFNOSUPPORT\",\n\t98:  \"SYS_EADDRINUSE\",\n\t99:  \"SYS_EADDRNOTAVAIL\",\n\t100: \"SYS_ENETDOWN\",\n\t101: \"SYS_ENETUNREACH\",\n\t102: \"SYS_ENETRESET\",\n\t103: \"SYS_ECONNABORTED\",\n\t104: \"SYS_ECONNRESET\",\n\t105: \"SYS_ENOBUFS\",\n\t106: \"SYS_EISCONN\",\n\t107: \"SYS_ENOTCONN\",\n\t108: \"SYS_ESHUTDOWN\",\n\t109: \"SYS_ETOOMANYREFS\",\n\t110: \"SYS_ETIMEDOUT\",\n\t111: \"SYS_ECONNREFUSED\",\n\t112: \"SYS_EHOSTDOWN\",\n\t113: \"SYS_EHOSTUNREACH\",\n\t114: \"SYS_EALREADY\",\n\t115: \"SYS_EINPROGRESS\",\n\t116: \"SYS_ESTALE\",\n\t117: \"SYS_EUCLEAN\",\n\t118: \"SYS_ENOTNAM\",\n\t119: \"SYS_ENAVAIL\",\n\t120: \"SYS_EISNAM\",\n\t121: \"SYS_EREMOTEIO\",\n\t122: \"SYS_EDQUOT\",\n\t123: \"SYS_ENOMEDIUM\",\n\t124: \"SYS_EMEDIUMTYPE\",\n\t125: \"SYS_ECANCELED\",\n\t126: \"SYS_ENOKEY\",\n\t127: \"SYS_EKEYEXPIRED\",\n\t128: \"SYS_EKEYREVOKED\",\n\t129: \"SYS_EKEYREJECTED\",\n\t130: \"SYS_EOWNERDEAD\",\n\t131: \"SYS_ENOTRECOVERABLE\",\n\t132: \"SYS_ERFKILL\",\n}\nvar RemoteSocketServiceError_SystemError_value = map[string]int32{\n\t\"SYS_SUCCESS\":         0,\n\t\"SYS_EPERM\":           1,\n\t\"SYS_ENOENT\":          2,\n\t\"SYS_ESRCH\":           3,\n\t\"SYS_EINTR\":           4,\n\t\"SYS_EIO\":             5,\n\t\"SYS_ENXIO\":           6,\n\t\"SYS_E2BIG\":           7,\n\t\"SYS_ENOEXEC\":         8,\n\t\"SYS_EBADF\":           9,\n\t\"SYS_ECHILD\":          10,\n\t\"SYS_EAGAIN\":          11,\n\t\"SYS_EWOULDBLOCK\":     11,\n\t\"SYS_ENOMEM\":          12,\n\t\"SYS_EACCES\":          13,\n\t\"SYS_EFAULT\":          14,\n\t\"SYS_ENOTBLK\":         15,\n\t\"SYS_EBUSY\":           16,\n\t\"SYS_EEXIST\":          17,\n\t\"SYS_EXDEV\":           18,\n\t\"SYS_ENODEV\":          19,\n\t\"SYS_ENOTDIR\":         20,\n\t\"SYS_EISDIR\":          21,\n\t\"SYS_EINVAL\":          22,\n\t\"SYS_ENFILE\":          23,\n\t\"SYS_EMFILE\":          24,\n\t\"SYS_ENOTTY\":          25,\n\t\"SYS_ETXTBSY\":         26,\n\t\"SYS_EFBIG\":           27,\n\t\"SYS_ENOSPC\":          28,\n\t\"SYS_ESPIPE\":          29,\n\t\"SYS_EROFS\":           30,\n\t\"SYS_EMLINK\":          31,\n\t\"SYS_EPIPE\":           32,\n\t\"SYS_EDOM\":            33,\n\t\"SYS_ERANGE\":          34,\n\t\"SYS_EDEADLK\":         35,\n\t\"SYS_EDEADLOCK\":       35,\n\t\"SYS_ENAMETOOLONG\":    36,\n\t\"SYS_ENOLCK\":          37,\n\t\"SYS_ENOSYS\":          38,\n\t\"SYS_ENOTEMPTY\":       39,\n\t\"SYS_ELOOP\":           40,\n\t\"SYS_ENOMSG\":          42,\n\t\"SYS_EIDRM\":           43,\n\t\"SYS_ECHRNG\":          44,\n\t\"SYS_EL2NSYNC\":        45,\n\t\"SYS_EL3HLT\":          46,\n\t\"SYS_EL3RST\":          47,\n\t\"SYS_ELNRNG\":          48,\n\t\"SYS_EUNATCH\":         49,\n\t\"SYS_ENOCSI\":          50,\n\t\"SYS_EL2HLT\":          51,\n\t\"SYS_EBADE\":           52,\n\t\"SYS_EBADR\":           53,\n\t\"SYS_EXFULL\":          54,\n\t\"SYS_ENOANO\":          55,\n\t\"SYS_EBADRQC\":         56,\n\t\"SYS_EBADSLT\":         57,\n\t\"SYS_EBFONT\":          59,\n\t\"SYS_ENOSTR\":          60,\n\t\"SYS_ENODATA\":         61,\n\t\"SYS_ETIME\":           62,\n\t\"SYS_ENOSR\":           63,\n\t\"SYS_ENONET\":          64,\n\t\"SYS_ENOPKG\":          65,\n\t\"SYS_EREMOTE\":         66,\n\t\"SYS_ENOLINK\":         67,\n\t\"SYS_EADV\":            68,\n\t\"SYS_ESRMNT\":          69,\n\t\"SYS_ECOMM\":           70,\n\t\"SYS_EPROTO\":          71,\n\t\"SYS_EMULTIHOP\":       72,\n\t\"SYS_EDOTDOT\":         73,\n\t\"SYS_EBADMSG\":         74,\n\t\"SYS_EOVERFLOW\":       75,\n\t\"SYS_ENOTUNIQ\":        76,\n\t\"SYS_EBADFD\":          77,\n\t\"SYS_EREMCHG\":         78,\n\t\"SYS_ELIBACC\":         79,\n\t\"SYS_ELIBBAD\":         80,\n\t\"SYS_ELIBSCN\":         81,\n\t\"SYS_ELIBMAX\":         82,\n\t\"SYS_ELIBEXEC\":        83,\n\t\"SYS_EILSEQ\":          84,\n\t\"SYS_ERESTART\":        85,\n\t\"SYS_ESTRPIPE\":        86,\n\t\"SYS_EUSERS\":          87,\n\t\"SYS_ENOTSOCK\":        88,\n\t\"SYS_EDESTADDRREQ\":    89,\n\t\"SYS_EMSGSIZE\":        90,\n\t\"SYS_EPROTOTYPE\":      91,\n\t\"SYS_ENOPROTOOPT\":     92,\n\t\"SYS_EPROTONOSUPPORT\": 93,\n\t\"SYS_ESOCKTNOSUPPORT\": 94,\n\t\"SYS_EOPNOTSUPP\":      95,\n\t\"SYS_ENOTSUP\":         95,\n\t\"SYS_EPFNOSUPPORT\":    96,\n\t\"SYS_EAFNOSUPPORT\":    97,\n\t\"SYS_EADDRINUSE\":      98,\n\t\"SYS_EADDRNOTAVAIL\":   99,\n\t\"SYS_ENETDOWN\":        100,\n\t\"SYS_ENETUNREACH\":     101,\n\t\"SYS_ENETRESET\":       102,\n\t\"SYS_ECONNABORTED\":    103,\n\t\"SYS_ECONNRESET\":      104,\n\t\"SYS_ENOBUFS\":         105,\n\t\"SYS_EISCONN\":         106,\n\t\"SYS_ENOTCONN\":        107,\n\t\"SYS_ESHUTDOWN\":       108,\n\t\"SYS_ETOOMANYREFS\":    109,\n\t\"SYS_ETIMEDOUT\":       110,\n\t\"SYS_ECONNREFUSED\":    111,\n\t\"SYS_EHOSTDOWN\":       112,\n\t\"SYS_EHOSTUNREACH\":    113,\n\t\"SYS_EALREADY\":        114,\n\t\"SYS_EINPROGRESS\":     115,\n\t\"SYS_ESTALE\":          116,\n\t\"SYS_EUCLEAN\":         117,\n\t\"SYS_ENOTNAM\":         118,\n\t\"SYS_ENAVAIL\":         119,\n\t\"SYS_EISNAM\":          120,\n\t\"SYS_EREMOTEIO\":       121,\n\t\"SYS_EDQUOT\":          122,\n\t\"SYS_ENOMEDIUM\":       123,\n\t\"SYS_EMEDIUMTYPE\":     124,\n\t\"SYS_ECANCELED\":       125,\n\t\"SYS_ENOKEY\":          126,\n\t\"SYS_EKEYEXPIRED\":     127,\n\t\"SYS_EKEYREVOKED\":     128,\n\t\"SYS_EKEYREJECTED\":    129,\n\t\"SYS_EOWNERDEAD\":      130,\n\t\"SYS_ENOTRECOVERABLE\": 131,\n\t\"SYS_ERFKILL\":         132,\n}\n\nfunc (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError {\n\tp := new(RemoteSocketServiceError_SystemError)\n\t*p = x\n\treturn p\n}\nfunc (x RemoteSocketServiceError_SystemError) String() string {\n\treturn proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x))\n}\nfunc (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, \"RemoteSocketServiceError_SystemError\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = RemoteSocketServiceError_SystemError(value)\n\treturn nil\n}\n\ntype CreateSocketRequest_SocketFamily int32\n\nconst (\n\tCreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1\n\tCreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2\n)\n\nvar CreateSocketRequest_SocketFamily_name = map[int32]string{\n\t1: \"IPv4\",\n\t2: \"IPv6\",\n}\nvar CreateSocketRequest_SocketFamily_value = map[string]int32{\n\t\"IPv4\": 1,\n\t\"IPv6\": 2,\n}\n\nfunc (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily {\n\tp := new(CreateSocketRequest_SocketFamily)\n\t*p = x\n\treturn p\n}\nfunc (x CreateSocketRequest_SocketFamily) String() string {\n\treturn proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x))\n}\nfunc (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, \"CreateSocketRequest_SocketFamily\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CreateSocketRequest_SocketFamily(value)\n\treturn nil\n}\n\ntype CreateSocketRequest_SocketProtocol int32\n\nconst (\n\tCreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1\n\tCreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2\n)\n\nvar CreateSocketRequest_SocketProtocol_name = map[int32]string{\n\t1: \"TCP\",\n\t2: \"UDP\",\n}\nvar CreateSocketRequest_SocketProtocol_value = map[string]int32{\n\t\"TCP\": 1,\n\t\"UDP\": 2,\n}\n\nfunc (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol {\n\tp := new(CreateSocketRequest_SocketProtocol)\n\t*p = x\n\treturn p\n}\nfunc (x CreateSocketRequest_SocketProtocol) String() string {\n\treturn proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x))\n}\nfunc (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, \"CreateSocketRequest_SocketProtocol\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CreateSocketRequest_SocketProtocol(value)\n\treturn nil\n}\n\ntype SocketOption_SocketOptionLevel int32\n\nconst (\n\tSocketOption_SOCKET_SOL_IP     SocketOption_SocketOptionLevel = 0\n\tSocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1\n\tSocketOption_SOCKET_SOL_TCP    SocketOption_SocketOptionLevel = 6\n\tSocketOption_SOCKET_SOL_UDP    SocketOption_SocketOptionLevel = 17\n)\n\nvar SocketOption_SocketOptionLevel_name = map[int32]string{\n\t0:  \"SOCKET_SOL_IP\",\n\t1:  \"SOCKET_SOL_SOCKET\",\n\t6:  \"SOCKET_SOL_TCP\",\n\t17: \"SOCKET_SOL_UDP\",\n}\nvar SocketOption_SocketOptionLevel_value = map[string]int32{\n\t\"SOCKET_SOL_IP\":     0,\n\t\"SOCKET_SOL_SOCKET\": 1,\n\t\"SOCKET_SOL_TCP\":    6,\n\t\"SOCKET_SOL_UDP\":    17,\n}\n\nfunc (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel {\n\tp := new(SocketOption_SocketOptionLevel)\n\t*p = x\n\treturn p\n}\nfunc (x SocketOption_SocketOptionLevel) String() string {\n\treturn proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x))\n}\nfunc (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, \"SocketOption_SocketOptionLevel\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = SocketOption_SocketOptionLevel(value)\n\treturn nil\n}\n\ntype SocketOption_SocketOptionName int32\n\nconst (\n\tSocketOption_SOCKET_SO_DEBUG         SocketOption_SocketOptionName = 1\n\tSocketOption_SOCKET_SO_REUSEADDR     SocketOption_SocketOptionName = 2\n\tSocketOption_SOCKET_SO_TYPE          SocketOption_SocketOptionName = 3\n\tSocketOption_SOCKET_SO_ERROR         SocketOption_SocketOptionName = 4\n\tSocketOption_SOCKET_SO_DONTROUTE     SocketOption_SocketOptionName = 5\n\tSocketOption_SOCKET_SO_BROADCAST     SocketOption_SocketOptionName = 6\n\tSocketOption_SOCKET_SO_SNDBUF        SocketOption_SocketOptionName = 7\n\tSocketOption_SOCKET_SO_RCVBUF        SocketOption_SocketOptionName = 8\n\tSocketOption_SOCKET_SO_KEEPALIVE     SocketOption_SocketOptionName = 9\n\tSocketOption_SOCKET_SO_OOBINLINE     SocketOption_SocketOptionName = 10\n\tSocketOption_SOCKET_SO_LINGER        SocketOption_SocketOptionName = 13\n\tSocketOption_SOCKET_SO_RCVTIMEO      SocketOption_SocketOptionName = 20\n\tSocketOption_SOCKET_SO_SNDTIMEO      SocketOption_SocketOptionName = 21\n\tSocketOption_SOCKET_IP_TOS           SocketOption_SocketOptionName = 1\n\tSocketOption_SOCKET_IP_TTL           SocketOption_SocketOptionName = 2\n\tSocketOption_SOCKET_IP_HDRINCL       SocketOption_SocketOptionName = 3\n\tSocketOption_SOCKET_IP_OPTIONS       SocketOption_SocketOptionName = 4\n\tSocketOption_SOCKET_TCP_NODELAY      SocketOption_SocketOptionName = 1\n\tSocketOption_SOCKET_TCP_MAXSEG       SocketOption_SocketOptionName = 2\n\tSocketOption_SOCKET_TCP_CORK         SocketOption_SocketOptionName = 3\n\tSocketOption_SOCKET_TCP_KEEPIDLE     SocketOption_SocketOptionName = 4\n\tSocketOption_SOCKET_TCP_KEEPINTVL    SocketOption_SocketOptionName = 5\n\tSocketOption_SOCKET_TCP_KEEPCNT      SocketOption_SocketOptionName = 6\n\tSocketOption_SOCKET_TCP_SYNCNT       SocketOption_SocketOptionName = 7\n\tSocketOption_SOCKET_TCP_LINGER2      SocketOption_SocketOptionName = 8\n\tSocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9\n\tSocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10\n\tSocketOption_SOCKET_TCP_INFO         SocketOption_SocketOptionName = 11\n\tSocketOption_SOCKET_TCP_QUICKACK     SocketOption_SocketOptionName = 12\n)\n\nvar SocketOption_SocketOptionName_name = map[int32]string{\n\t1:  \"SOCKET_SO_DEBUG\",\n\t2:  \"SOCKET_SO_REUSEADDR\",\n\t3:  \"SOCKET_SO_TYPE\",\n\t4:  \"SOCKET_SO_ERROR\",\n\t5:  \"SOCKET_SO_DONTROUTE\",\n\t6:  \"SOCKET_SO_BROADCAST\",\n\t7:  \"SOCKET_SO_SNDBUF\",\n\t8:  \"SOCKET_SO_RCVBUF\",\n\t9:  \"SOCKET_SO_KEEPALIVE\",\n\t10: \"SOCKET_SO_OOBINLINE\",\n\t13: \"SOCKET_SO_LINGER\",\n\t20: \"SOCKET_SO_RCVTIMEO\",\n\t21: \"SOCKET_SO_SNDTIMEO\",\n\t// Duplicate value: 1: \"SOCKET_IP_TOS\",\n\t// Duplicate value: 2: \"SOCKET_IP_TTL\",\n\t// Duplicate value: 3: \"SOCKET_IP_HDRINCL\",\n\t// Duplicate value: 4: \"SOCKET_IP_OPTIONS\",\n\t// Duplicate value: 1: \"SOCKET_TCP_NODELAY\",\n\t// Duplicate value: 2: \"SOCKET_TCP_MAXSEG\",\n\t// Duplicate value: 3: \"SOCKET_TCP_CORK\",\n\t// Duplicate value: 4: \"SOCKET_TCP_KEEPIDLE\",\n\t// Duplicate value: 5: \"SOCKET_TCP_KEEPINTVL\",\n\t// Duplicate value: 6: \"SOCKET_TCP_KEEPCNT\",\n\t// Duplicate value: 7: \"SOCKET_TCP_SYNCNT\",\n\t// Duplicate value: 8: \"SOCKET_TCP_LINGER2\",\n\t// Duplicate value: 9: \"SOCKET_TCP_DEFER_ACCEPT\",\n\t// Duplicate value: 10: \"SOCKET_TCP_WINDOW_CLAMP\",\n\t11: \"SOCKET_TCP_INFO\",\n\t12: \"SOCKET_TCP_QUICKACK\",\n}\nvar SocketOption_SocketOptionName_value = map[string]int32{\n\t\"SOCKET_SO_DEBUG\":         1,\n\t\"SOCKET_SO_REUSEADDR\":     2,\n\t\"SOCKET_SO_TYPE\":          3,\n\t\"SOCKET_SO_ERROR\":         4,\n\t\"SOCKET_SO_DONTROUTE\":     5,\n\t\"SOCKET_SO_BROADCAST\":     6,\n\t\"SOCKET_SO_SNDBUF\":        7,\n\t\"SOCKET_SO_RCVBUF\":        8,\n\t\"SOCKET_SO_KEEPALIVE\":     9,\n\t\"SOCKET_SO_OOBINLINE\":     10,\n\t\"SOCKET_SO_LINGER\":        13,\n\t\"SOCKET_SO_RCVTIMEO\":      20,\n\t\"SOCKET_SO_SNDTIMEO\":      21,\n\t\"SOCKET_IP_TOS\":           1,\n\t\"SOCKET_IP_TTL\":           2,\n\t\"SOCKET_IP_HDRINCL\":       3,\n\t\"SOCKET_IP_OPTIONS\":       4,\n\t\"SOCKET_TCP_NODELAY\":      1,\n\t\"SOCKET_TCP_MAXSEG\":       2,\n\t\"SOCKET_TCP_CORK\":         3,\n\t\"SOCKET_TCP_KEEPIDLE\":     4,\n\t\"SOCKET_TCP_KEEPINTVL\":    5,\n\t\"SOCKET_TCP_KEEPCNT\":      6,\n\t\"SOCKET_TCP_SYNCNT\":       7,\n\t\"SOCKET_TCP_LINGER2\":      8,\n\t\"SOCKET_TCP_DEFER_ACCEPT\": 9,\n\t\"SOCKET_TCP_WINDOW_CLAMP\": 10,\n\t\"SOCKET_TCP_INFO\":         11,\n\t\"SOCKET_TCP_QUICKACK\":     12,\n}\n\nfunc (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName {\n\tp := new(SocketOption_SocketOptionName)\n\t*p = x\n\treturn p\n}\nfunc (x SocketOption_SocketOptionName) String() string {\n\treturn proto.EnumName(SocketOption_SocketOptionName_name, int32(x))\n}\nfunc (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, \"SocketOption_SocketOptionName\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = SocketOption_SocketOptionName(value)\n\treturn nil\n}\n\ntype ShutDownRequest_How int32\n\nconst (\n\tShutDownRequest_SOCKET_SHUT_RD   ShutDownRequest_How = 1\n\tShutDownRequest_SOCKET_SHUT_WR   ShutDownRequest_How = 2\n\tShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3\n)\n\nvar ShutDownRequest_How_name = map[int32]string{\n\t1: \"SOCKET_SHUT_RD\",\n\t2: \"SOCKET_SHUT_WR\",\n\t3: \"SOCKET_SHUT_RDWR\",\n}\nvar ShutDownRequest_How_value = map[string]int32{\n\t\"SOCKET_SHUT_RD\":   1,\n\t\"SOCKET_SHUT_WR\":   2,\n\t\"SOCKET_SHUT_RDWR\": 3,\n}\n\nfunc (x ShutDownRequest_How) Enum() *ShutDownRequest_How {\n\tp := new(ShutDownRequest_How)\n\t*p = x\n\treturn p\n}\nfunc (x ShutDownRequest_How) String() string {\n\treturn proto.EnumName(ShutDownRequest_How_name, int32(x))\n}\nfunc (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, \"ShutDownRequest_How\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ShutDownRequest_How(value)\n\treturn nil\n}\n\ntype ReceiveRequest_Flags int32\n\nconst (\n\tReceiveRequest_MSG_OOB  ReceiveRequest_Flags = 1\n\tReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2\n)\n\nvar ReceiveRequest_Flags_name = map[int32]string{\n\t1: \"MSG_OOB\",\n\t2: \"MSG_PEEK\",\n}\nvar ReceiveRequest_Flags_value = map[string]int32{\n\t\"MSG_OOB\":  1,\n\t\"MSG_PEEK\": 2,\n}\n\nfunc (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags {\n\tp := new(ReceiveRequest_Flags)\n\t*p = x\n\treturn p\n}\nfunc (x ReceiveRequest_Flags) String() string {\n\treturn proto.EnumName(ReceiveRequest_Flags_name, int32(x))\n}\nfunc (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, \"ReceiveRequest_Flags\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ReceiveRequest_Flags(value)\n\treturn nil\n}\n\ntype PollEvent_PollEventFlag int32\n\nconst (\n\tPollEvent_SOCKET_POLLNONE   PollEvent_PollEventFlag = 0\n\tPollEvent_SOCKET_POLLIN     PollEvent_PollEventFlag = 1\n\tPollEvent_SOCKET_POLLPRI    PollEvent_PollEventFlag = 2\n\tPollEvent_SOCKET_POLLOUT    PollEvent_PollEventFlag = 4\n\tPollEvent_SOCKET_POLLERR    PollEvent_PollEventFlag = 8\n\tPollEvent_SOCKET_POLLHUP    PollEvent_PollEventFlag = 16\n\tPollEvent_SOCKET_POLLNVAL   PollEvent_PollEventFlag = 32\n\tPollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64\n\tPollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128\n\tPollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256\n\tPollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512\n\tPollEvent_SOCKET_POLLMSG    PollEvent_PollEventFlag = 1024\n\tPollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096\n\tPollEvent_SOCKET_POLLRDHUP  PollEvent_PollEventFlag = 8192\n)\n\nvar PollEvent_PollEventFlag_name = map[int32]string{\n\t0:    \"SOCKET_POLLNONE\",\n\t1:    \"SOCKET_POLLIN\",\n\t2:    \"SOCKET_POLLPRI\",\n\t4:    \"SOCKET_POLLOUT\",\n\t8:    \"SOCKET_POLLERR\",\n\t16:   \"SOCKET_POLLHUP\",\n\t32:   \"SOCKET_POLLNVAL\",\n\t64:   \"SOCKET_POLLRDNORM\",\n\t128:  \"SOCKET_POLLRDBAND\",\n\t256:  \"SOCKET_POLLWRNORM\",\n\t512:  \"SOCKET_POLLWRBAND\",\n\t1024: \"SOCKET_POLLMSG\",\n\t4096: \"SOCKET_POLLREMOVE\",\n\t8192: \"SOCKET_POLLRDHUP\",\n}\nvar PollEvent_PollEventFlag_value = map[string]int32{\n\t\"SOCKET_POLLNONE\":   0,\n\t\"SOCKET_POLLIN\":     1,\n\t\"SOCKET_POLLPRI\":    2,\n\t\"SOCKET_POLLOUT\":    4,\n\t\"SOCKET_POLLERR\":    8,\n\t\"SOCKET_POLLHUP\":    16,\n\t\"SOCKET_POLLNVAL\":   32,\n\t\"SOCKET_POLLRDNORM\": 64,\n\t\"SOCKET_POLLRDBAND\": 128,\n\t\"SOCKET_POLLWRNORM\": 256,\n\t\"SOCKET_POLLWRBAND\": 512,\n\t\"SOCKET_POLLMSG\":    1024,\n\t\"SOCKET_POLLREMOVE\": 4096,\n\t\"SOCKET_POLLRDHUP\":  8192,\n}\n\nfunc (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag {\n\tp := new(PollEvent_PollEventFlag)\n\t*p = x\n\treturn p\n}\nfunc (x PollEvent_PollEventFlag) String() string {\n\treturn proto.EnumName(PollEvent_PollEventFlag_name, int32(x))\n}\nfunc (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, \"PollEvent_PollEventFlag\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PollEvent_PollEventFlag(value)\n\treturn nil\n}\n\ntype ResolveReply_ErrorCode int32\n\nconst (\n\tResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1\n\tResolveReply_SOCKET_EAI_AGAIN      ResolveReply_ErrorCode = 2\n\tResolveReply_SOCKET_EAI_BADFLAGS   ResolveReply_ErrorCode = 3\n\tResolveReply_SOCKET_EAI_FAIL       ResolveReply_ErrorCode = 4\n\tResolveReply_SOCKET_EAI_FAMILY     ResolveReply_ErrorCode = 5\n\tResolveReply_SOCKET_EAI_MEMORY     ResolveReply_ErrorCode = 6\n\tResolveReply_SOCKET_EAI_NODATA     ResolveReply_ErrorCode = 7\n\tResolveReply_SOCKET_EAI_NONAME     ResolveReply_ErrorCode = 8\n\tResolveReply_SOCKET_EAI_SERVICE    ResolveReply_ErrorCode = 9\n\tResolveReply_SOCKET_EAI_SOCKTYPE   ResolveReply_ErrorCode = 10\n\tResolveReply_SOCKET_EAI_SYSTEM     ResolveReply_ErrorCode = 11\n\tResolveReply_SOCKET_EAI_BADHINTS   ResolveReply_ErrorCode = 12\n\tResolveReply_SOCKET_EAI_PROTOCOL   ResolveReply_ErrorCode = 13\n\tResolveReply_SOCKET_EAI_OVERFLOW   ResolveReply_ErrorCode = 14\n\tResolveReply_SOCKET_EAI_MAX        ResolveReply_ErrorCode = 15\n)\n\nvar ResolveReply_ErrorCode_name = map[int32]string{\n\t1:  \"SOCKET_EAI_ADDRFAMILY\",\n\t2:  \"SOCKET_EAI_AGAIN\",\n\t3:  \"SOCKET_EAI_BADFLAGS\",\n\t4:  \"SOCKET_EAI_FAIL\",\n\t5:  \"SOCKET_EAI_FAMILY\",\n\t6:  \"SOCKET_EAI_MEMORY\",\n\t7:  \"SOCKET_EAI_NODATA\",\n\t8:  \"SOCKET_EAI_NONAME\",\n\t9:  \"SOCKET_EAI_SERVICE\",\n\t10: \"SOCKET_EAI_SOCKTYPE\",\n\t11: \"SOCKET_EAI_SYSTEM\",\n\t12: \"SOCKET_EAI_BADHINTS\",\n\t13: \"SOCKET_EAI_PROTOCOL\",\n\t14: \"SOCKET_EAI_OVERFLOW\",\n\t15: \"SOCKET_EAI_MAX\",\n}\nvar ResolveReply_ErrorCode_value = map[string]int32{\n\t\"SOCKET_EAI_ADDRFAMILY\": 1,\n\t\"SOCKET_EAI_AGAIN\":      2,\n\t\"SOCKET_EAI_BADFLAGS\":   3,\n\t\"SOCKET_EAI_FAIL\":       4,\n\t\"SOCKET_EAI_FAMILY\":     5,\n\t\"SOCKET_EAI_MEMORY\":     6,\n\t\"SOCKET_EAI_NODATA\":     7,\n\t\"SOCKET_EAI_NONAME\":     8,\n\t\"SOCKET_EAI_SERVICE\":    9,\n\t\"SOCKET_EAI_SOCKTYPE\":   10,\n\t\"SOCKET_EAI_SYSTEM\":     11,\n\t\"SOCKET_EAI_BADHINTS\":   12,\n\t\"SOCKET_EAI_PROTOCOL\":   13,\n\t\"SOCKET_EAI_OVERFLOW\":   14,\n\t\"SOCKET_EAI_MAX\":        15,\n}\n\nfunc (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode {\n\tp := new(ResolveReply_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x ResolveReply_ErrorCode) String() string {\n\treturn proto.EnumName(ResolveReply_ErrorCode_name, int32(x))\n}\nfunc (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, \"ResolveReply_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ResolveReply_ErrorCode(value)\n\treturn nil\n}\n\ntype RemoteSocketServiceError struct {\n\tSystemError      *int32  `protobuf:\"varint,1,opt,name=system_error,def=0\" json:\"system_error,omitempty\"`\n\tErrorDetail      *string `protobuf:\"bytes,2,opt,name=error_detail\" json:\"error_detail,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *RemoteSocketServiceError) Reset()         { *m = RemoteSocketServiceError{} }\nfunc (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*RemoteSocketServiceError) ProtoMessage()    {}\n\nconst Default_RemoteSocketServiceError_SystemError int32 = 0\n\nfunc (m *RemoteSocketServiceError) GetSystemError() int32 {\n\tif m != nil && m.SystemError != nil {\n\t\treturn *m.SystemError\n\t}\n\treturn Default_RemoteSocketServiceError_SystemError\n}\n\nfunc (m *RemoteSocketServiceError) GetErrorDetail() string {\n\tif m != nil && m.ErrorDetail != nil {\n\t\treturn *m.ErrorDetail\n\t}\n\treturn \"\"\n}\n\ntype AddressPort struct {\n\tPort             *int32  `protobuf:\"varint,1,req,name=port\" json:\"port,omitempty\"`\n\tPackedAddress    []byte  `protobuf:\"bytes,2,opt,name=packed_address\" json:\"packed_address,omitempty\"`\n\tHostnameHint     *string `protobuf:\"bytes,3,opt,name=hostname_hint\" json:\"hostname_hint,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *AddressPort) Reset()         { *m = AddressPort{} }\nfunc (m *AddressPort) String() string { return proto.CompactTextString(m) }\nfunc (*AddressPort) ProtoMessage()    {}\n\nfunc (m *AddressPort) GetPort() int32 {\n\tif m != nil && m.Port != nil {\n\t\treturn *m.Port\n\t}\n\treturn 0\n}\n\nfunc (m *AddressPort) GetPackedAddress() []byte {\n\tif m != nil {\n\t\treturn m.PackedAddress\n\t}\n\treturn nil\n}\n\nfunc (m *AddressPort) GetHostnameHint() string {\n\tif m != nil && m.HostnameHint != nil {\n\t\treturn *m.HostnameHint\n\t}\n\treturn \"\"\n}\n\ntype CreateSocketRequest struct {\n\tFamily           *CreateSocketRequest_SocketFamily   `protobuf:\"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily\" json:\"family,omitempty\"`\n\tProtocol         *CreateSocketRequest_SocketProtocol `protobuf:\"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol\" json:\"protocol,omitempty\"`\n\tSocketOptions    []*SocketOption                     `protobuf:\"bytes,3,rep,name=socket_options\" json:\"socket_options,omitempty\"`\n\tProxyExternalIp  *AddressPort                        `protobuf:\"bytes,4,opt,name=proxy_external_ip\" json:\"proxy_external_ip,omitempty\"`\n\tListenBacklog    *int32                              `protobuf:\"varint,5,opt,name=listen_backlog,def=0\" json:\"listen_backlog,omitempty\"`\n\tRemoteIp         *AddressPort                        `protobuf:\"bytes,6,opt,name=remote_ip\" json:\"remote_ip,omitempty\"`\n\tAppId            *string                             `protobuf:\"bytes,9,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tProjectId        *int64                              `protobuf:\"varint,10,opt,name=project_id\" json:\"project_id,omitempty\"`\n\tXXX_unrecognized []byte                              `json:\"-\"`\n}\n\nfunc (m *CreateSocketRequest) Reset()         { *m = CreateSocketRequest{} }\nfunc (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateSocketRequest) ProtoMessage()    {}\n\nconst Default_CreateSocketRequest_ListenBacklog int32 = 0\n\nfunc (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily {\n\tif m != nil && m.Family != nil {\n\t\treturn *m.Family\n\t}\n\treturn CreateSocketRequest_IPv4\n}\n\nfunc (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol {\n\tif m != nil && m.Protocol != nil {\n\t\treturn *m.Protocol\n\t}\n\treturn CreateSocketRequest_TCP\n}\n\nfunc (m *CreateSocketRequest) GetSocketOptions() []*SocketOption {\n\tif m != nil {\n\t\treturn m.SocketOptions\n\t}\n\treturn nil\n}\n\nfunc (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.ProxyExternalIp\n\t}\n\treturn nil\n}\n\nfunc (m *CreateSocketRequest) GetListenBacklog() int32 {\n\tif m != nil && m.ListenBacklog != nil {\n\t\treturn *m.ListenBacklog\n\t}\n\treturn Default_CreateSocketRequest_ListenBacklog\n}\n\nfunc (m *CreateSocketRequest) GetRemoteIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.RemoteIp\n\t}\n\treturn nil\n}\n\nfunc (m *CreateSocketRequest) GetAppId() string {\n\tif m != nil && m.AppId != nil {\n\t\treturn *m.AppId\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateSocketRequest) GetProjectId() int64 {\n\tif m != nil && m.ProjectId != nil {\n\t\treturn *m.ProjectId\n\t}\n\treturn 0\n}\n\ntype CreateSocketReply struct {\n\tSocketDescriptor *string                   `protobuf:\"bytes,1,opt,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tServerAddress    *AddressPort              `protobuf:\"bytes,3,opt,name=server_address\" json:\"server_address,omitempty\"`\n\tProxyExternalIp  *AddressPort              `protobuf:\"bytes,4,opt,name=proxy_external_ip\" json:\"proxy_external_ip,omitempty\"`\n\tXXX_extensions   map[int32]proto.Extension `json:\"-\"`\n\tXXX_unrecognized []byte                    `json:\"-\"`\n}\n\nfunc (m *CreateSocketReply) Reset()         { *m = CreateSocketReply{} }\nfunc (m *CreateSocketReply) String() string { return proto.CompactTextString(m) }\nfunc (*CreateSocketReply) ProtoMessage()    {}\n\nvar extRange_CreateSocketReply = []proto.ExtensionRange{\n\t{1000, 536870911},\n}\n\nfunc (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange {\n\treturn extRange_CreateSocketReply\n}\nfunc (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension {\n\tif m.XXX_extensions == nil {\n\t\tm.XXX_extensions = make(map[int32]proto.Extension)\n\t}\n\treturn m.XXX_extensions\n}\n\nfunc (m *CreateSocketReply) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateSocketReply) GetServerAddress() *AddressPort {\n\tif m != nil {\n\t\treturn m.ServerAddress\n\t}\n\treturn nil\n}\n\nfunc (m *CreateSocketReply) GetProxyExternalIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.ProxyExternalIp\n\t}\n\treturn nil\n}\n\ntype BindRequest struct {\n\tSocketDescriptor *string      `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tProxyExternalIp  *AddressPort `protobuf:\"bytes,2,req,name=proxy_external_ip\" json:\"proxy_external_ip,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *BindRequest) Reset()         { *m = BindRequest{} }\nfunc (m *BindRequest) String() string { return proto.CompactTextString(m) }\nfunc (*BindRequest) ProtoMessage()    {}\n\nfunc (m *BindRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *BindRequest) GetProxyExternalIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.ProxyExternalIp\n\t}\n\treturn nil\n}\n\ntype BindReply struct {\n\tProxyExternalIp  *AddressPort `protobuf:\"bytes,1,opt,name=proxy_external_ip\" json:\"proxy_external_ip,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *BindReply) Reset()         { *m = BindReply{} }\nfunc (m *BindReply) String() string { return proto.CompactTextString(m) }\nfunc (*BindReply) ProtoMessage()    {}\n\nfunc (m *BindReply) GetProxyExternalIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.ProxyExternalIp\n\t}\n\treturn nil\n}\n\ntype GetSocketNameRequest struct {\n\tSocketDescriptor *string `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetSocketNameRequest) Reset()         { *m = GetSocketNameRequest{} }\nfunc (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetSocketNameRequest) ProtoMessage()    {}\n\nfunc (m *GetSocketNameRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\ntype GetSocketNameReply struct {\n\tProxyExternalIp  *AddressPort `protobuf:\"bytes,2,opt,name=proxy_external_ip\" json:\"proxy_external_ip,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *GetSocketNameReply) Reset()         { *m = GetSocketNameReply{} }\nfunc (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) }\nfunc (*GetSocketNameReply) ProtoMessage()    {}\n\nfunc (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.ProxyExternalIp\n\t}\n\treturn nil\n}\n\ntype GetPeerNameRequest struct {\n\tSocketDescriptor *string `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GetPeerNameRequest) Reset()         { *m = GetPeerNameRequest{} }\nfunc (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetPeerNameRequest) ProtoMessage()    {}\n\nfunc (m *GetPeerNameRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\ntype GetPeerNameReply struct {\n\tPeerIp           *AddressPort `protobuf:\"bytes,2,opt,name=peer_ip\" json:\"peer_ip,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *GetPeerNameReply) Reset()         { *m = GetPeerNameReply{} }\nfunc (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) }\nfunc (*GetPeerNameReply) ProtoMessage()    {}\n\nfunc (m *GetPeerNameReply) GetPeerIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.PeerIp\n\t}\n\treturn nil\n}\n\ntype SocketOption struct {\n\tLevel            *SocketOption_SocketOptionLevel `protobuf:\"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel\" json:\"level,omitempty\"`\n\tOption           *SocketOption_SocketOptionName  `protobuf:\"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName\" json:\"option,omitempty\"`\n\tValue            []byte                          `protobuf:\"bytes,3,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte                          `json:\"-\"`\n}\n\nfunc (m *SocketOption) Reset()         { *m = SocketOption{} }\nfunc (m *SocketOption) String() string { return proto.CompactTextString(m) }\nfunc (*SocketOption) ProtoMessage()    {}\n\nfunc (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel {\n\tif m != nil && m.Level != nil {\n\t\treturn *m.Level\n\t}\n\treturn SocketOption_SOCKET_SOL_IP\n}\n\nfunc (m *SocketOption) GetOption() SocketOption_SocketOptionName {\n\tif m != nil && m.Option != nil {\n\t\treturn *m.Option\n\t}\n\treturn SocketOption_SOCKET_SO_DEBUG\n}\n\nfunc (m *SocketOption) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype SetSocketOptionsRequest struct {\n\tSocketDescriptor *string         `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tOptions          []*SocketOption `protobuf:\"bytes,2,rep,name=options\" json:\"options,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *SetSocketOptionsRequest) Reset()         { *m = SetSocketOptionsRequest{} }\nfunc (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SetSocketOptionsRequest) ProtoMessage()    {}\n\nfunc (m *SetSocketOptionsRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetSocketOptionsRequest) GetOptions() []*SocketOption {\n\tif m != nil {\n\t\treturn m.Options\n\t}\n\treturn nil\n}\n\ntype SetSocketOptionsReply struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SetSocketOptionsReply) Reset()         { *m = SetSocketOptionsReply{} }\nfunc (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) }\nfunc (*SetSocketOptionsReply) ProtoMessage()    {}\n\ntype GetSocketOptionsRequest struct {\n\tSocketDescriptor *string         `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tOptions          []*SocketOption `protobuf:\"bytes,2,rep,name=options\" json:\"options,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *GetSocketOptionsRequest) Reset()         { *m = GetSocketOptionsRequest{} }\nfunc (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetSocketOptionsRequest) ProtoMessage()    {}\n\nfunc (m *GetSocketOptionsRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetSocketOptionsRequest) GetOptions() []*SocketOption {\n\tif m != nil {\n\t\treturn m.Options\n\t}\n\treturn nil\n}\n\ntype GetSocketOptionsReply struct {\n\tOptions          []*SocketOption `protobuf:\"bytes,2,rep,name=options\" json:\"options,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *GetSocketOptionsReply) Reset()         { *m = GetSocketOptionsReply{} }\nfunc (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) }\nfunc (*GetSocketOptionsReply) ProtoMessage()    {}\n\nfunc (m *GetSocketOptionsReply) GetOptions() []*SocketOption {\n\tif m != nil {\n\t\treturn m.Options\n\t}\n\treturn nil\n}\n\ntype ConnectRequest struct {\n\tSocketDescriptor *string      `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tRemoteIp         *AddressPort `protobuf:\"bytes,2,req,name=remote_ip\" json:\"remote_ip,omitempty\"`\n\tTimeoutSeconds   *float64     `protobuf:\"fixed64,3,opt,name=timeout_seconds,def=-1\" json:\"timeout_seconds,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *ConnectRequest) Reset()         { *m = ConnectRequest{} }\nfunc (m *ConnectRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ConnectRequest) ProtoMessage()    {}\n\nconst Default_ConnectRequest_TimeoutSeconds float64 = -1\n\nfunc (m *ConnectRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *ConnectRequest) GetRemoteIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.RemoteIp\n\t}\n\treturn nil\n}\n\nfunc (m *ConnectRequest) GetTimeoutSeconds() float64 {\n\tif m != nil && m.TimeoutSeconds != nil {\n\t\treturn *m.TimeoutSeconds\n\t}\n\treturn Default_ConnectRequest_TimeoutSeconds\n}\n\ntype ConnectReply struct {\n\tProxyExternalIp  *AddressPort              `protobuf:\"bytes,1,opt,name=proxy_external_ip\" json:\"proxy_external_ip,omitempty\"`\n\tXXX_extensions   map[int32]proto.Extension `json:\"-\"`\n\tXXX_unrecognized []byte                    `json:\"-\"`\n}\n\nfunc (m *ConnectReply) Reset()         { *m = ConnectReply{} }\nfunc (m *ConnectReply) String() string { return proto.CompactTextString(m) }\nfunc (*ConnectReply) ProtoMessage()    {}\n\nvar extRange_ConnectReply = []proto.ExtensionRange{\n\t{1000, 536870911},\n}\n\nfunc (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange {\n\treturn extRange_ConnectReply\n}\nfunc (m *ConnectReply) ExtensionMap() map[int32]proto.Extension {\n\tif m.XXX_extensions == nil {\n\t\tm.XXX_extensions = make(map[int32]proto.Extension)\n\t}\n\treturn m.XXX_extensions\n}\n\nfunc (m *ConnectReply) GetProxyExternalIp() *AddressPort {\n\tif m != nil {\n\t\treturn m.ProxyExternalIp\n\t}\n\treturn nil\n}\n\ntype ListenRequest struct {\n\tSocketDescriptor *string `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tBacklog          *int32  `protobuf:\"varint,2,req,name=backlog\" json:\"backlog,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ListenRequest) Reset()         { *m = ListenRequest{} }\nfunc (m *ListenRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ListenRequest) ProtoMessage()    {}\n\nfunc (m *ListenRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *ListenRequest) GetBacklog() int32 {\n\tif m != nil && m.Backlog != nil {\n\t\treturn *m.Backlog\n\t}\n\treturn 0\n}\n\ntype ListenReply struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ListenReply) Reset()         { *m = ListenReply{} }\nfunc (m *ListenReply) String() string { return proto.CompactTextString(m) }\nfunc (*ListenReply) ProtoMessage()    {}\n\ntype AcceptRequest struct {\n\tSocketDescriptor *string  `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tTimeoutSeconds   *float64 `protobuf:\"fixed64,2,opt,name=timeout_seconds,def=-1\" json:\"timeout_seconds,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *AcceptRequest) Reset()         { *m = AcceptRequest{} }\nfunc (m *AcceptRequest) String() string { return proto.CompactTextString(m) }\nfunc (*AcceptRequest) ProtoMessage()    {}\n\nconst Default_AcceptRequest_TimeoutSeconds float64 = -1\n\nfunc (m *AcceptRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *AcceptRequest) GetTimeoutSeconds() float64 {\n\tif m != nil && m.TimeoutSeconds != nil {\n\t\treturn *m.TimeoutSeconds\n\t}\n\treturn Default_AcceptRequest_TimeoutSeconds\n}\n\ntype AcceptReply struct {\n\tNewSocketDescriptor []byte       `protobuf:\"bytes,2,opt,name=new_socket_descriptor\" json:\"new_socket_descriptor,omitempty\"`\n\tRemoteAddress       *AddressPort `protobuf:\"bytes,3,opt,name=remote_address\" json:\"remote_address,omitempty\"`\n\tXXX_unrecognized    []byte       `json:\"-\"`\n}\n\nfunc (m *AcceptReply) Reset()         { *m = AcceptReply{} }\nfunc (m *AcceptReply) String() string { return proto.CompactTextString(m) }\nfunc (*AcceptReply) ProtoMessage()    {}\n\nfunc (m *AcceptReply) GetNewSocketDescriptor() []byte {\n\tif m != nil {\n\t\treturn m.NewSocketDescriptor\n\t}\n\treturn nil\n}\n\nfunc (m *AcceptReply) GetRemoteAddress() *AddressPort {\n\tif m != nil {\n\t\treturn m.RemoteAddress\n\t}\n\treturn nil\n}\n\ntype ShutDownRequest struct {\n\tSocketDescriptor *string              `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tHow              *ShutDownRequest_How `protobuf:\"varint,2,req,name=how,enum=appengine.ShutDownRequest_How\" json:\"how,omitempty\"`\n\tSendOffset       *int64               `protobuf:\"varint,3,req,name=send_offset\" json:\"send_offset,omitempty\"`\n\tXXX_unrecognized []byte               `json:\"-\"`\n}\n\nfunc (m *ShutDownRequest) Reset()         { *m = ShutDownRequest{} }\nfunc (m *ShutDownRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ShutDownRequest) ProtoMessage()    {}\n\nfunc (m *ShutDownRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *ShutDownRequest) GetHow() ShutDownRequest_How {\n\tif m != nil && m.How != nil {\n\t\treturn *m.How\n\t}\n\treturn ShutDownRequest_SOCKET_SHUT_RD\n}\n\nfunc (m *ShutDownRequest) GetSendOffset() int64 {\n\tif m != nil && m.SendOffset != nil {\n\t\treturn *m.SendOffset\n\t}\n\treturn 0\n}\n\ntype ShutDownReply struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ShutDownReply) Reset()         { *m = ShutDownReply{} }\nfunc (m *ShutDownReply) String() string { return proto.CompactTextString(m) }\nfunc (*ShutDownReply) ProtoMessage()    {}\n\ntype CloseRequest struct {\n\tSocketDescriptor *string `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tSendOffset       *int64  `protobuf:\"varint,2,opt,name=send_offset,def=-1\" json:\"send_offset,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CloseRequest) Reset()         { *m = CloseRequest{} }\nfunc (m *CloseRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CloseRequest) ProtoMessage()    {}\n\nconst Default_CloseRequest_SendOffset int64 = -1\n\nfunc (m *CloseRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *CloseRequest) GetSendOffset() int64 {\n\tif m != nil && m.SendOffset != nil {\n\t\treturn *m.SendOffset\n\t}\n\treturn Default_CloseRequest_SendOffset\n}\n\ntype CloseReply struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *CloseReply) Reset()         { *m = CloseReply{} }\nfunc (m *CloseReply) String() string { return proto.CompactTextString(m) }\nfunc (*CloseReply) ProtoMessage()    {}\n\ntype SendRequest struct {\n\tSocketDescriptor *string      `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tData             []byte       `protobuf:\"bytes,2,req,name=data\" json:\"data,omitempty\"`\n\tStreamOffset     *int64       `protobuf:\"varint,3,req,name=stream_offset\" json:\"stream_offset,omitempty\"`\n\tFlags            *int32       `protobuf:\"varint,4,opt,name=flags,def=0\" json:\"flags,omitempty\"`\n\tSendTo           *AddressPort `protobuf:\"bytes,5,opt,name=send_to\" json:\"send_to,omitempty\"`\n\tTimeoutSeconds   *float64     `protobuf:\"fixed64,6,opt,name=timeout_seconds,def=-1\" json:\"timeout_seconds,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *SendRequest) Reset()         { *m = SendRequest{} }\nfunc (m *SendRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SendRequest) ProtoMessage()    {}\n\nconst Default_SendRequest_Flags int32 = 0\nconst Default_SendRequest_TimeoutSeconds float64 = -1\n\nfunc (m *SendRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *SendRequest) GetData() []byte {\n\tif m != nil {\n\t\treturn m.Data\n\t}\n\treturn nil\n}\n\nfunc (m *SendRequest) GetStreamOffset() int64 {\n\tif m != nil && m.StreamOffset != nil {\n\t\treturn *m.StreamOffset\n\t}\n\treturn 0\n}\n\nfunc (m *SendRequest) GetFlags() int32 {\n\tif m != nil && m.Flags != nil {\n\t\treturn *m.Flags\n\t}\n\treturn Default_SendRequest_Flags\n}\n\nfunc (m *SendRequest) GetSendTo() *AddressPort {\n\tif m != nil {\n\t\treturn m.SendTo\n\t}\n\treturn nil\n}\n\nfunc (m *SendRequest) GetTimeoutSeconds() float64 {\n\tif m != nil && m.TimeoutSeconds != nil {\n\t\treturn *m.TimeoutSeconds\n\t}\n\treturn Default_SendRequest_TimeoutSeconds\n}\n\ntype SendReply struct {\n\tDataSent         *int32 `protobuf:\"varint,1,opt,name=data_sent\" json:\"data_sent,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SendReply) Reset()         { *m = SendReply{} }\nfunc (m *SendReply) String() string { return proto.CompactTextString(m) }\nfunc (*SendReply) ProtoMessage()    {}\n\nfunc (m *SendReply) GetDataSent() int32 {\n\tif m != nil && m.DataSent != nil {\n\t\treturn *m.DataSent\n\t}\n\treturn 0\n}\n\ntype ReceiveRequest struct {\n\tSocketDescriptor *string  `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tDataSize         *int32   `protobuf:\"varint,2,req,name=data_size\" json:\"data_size,omitempty\"`\n\tFlags            *int32   `protobuf:\"varint,3,opt,name=flags,def=0\" json:\"flags,omitempty\"`\n\tTimeoutSeconds   *float64 `protobuf:\"fixed64,5,opt,name=timeout_seconds,def=-1\" json:\"timeout_seconds,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *ReceiveRequest) Reset()         { *m = ReceiveRequest{} }\nfunc (m *ReceiveRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ReceiveRequest) ProtoMessage()    {}\n\nconst Default_ReceiveRequest_Flags int32 = 0\nconst Default_ReceiveRequest_TimeoutSeconds float64 = -1\n\nfunc (m *ReceiveRequest) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *ReceiveRequest) GetDataSize() int32 {\n\tif m != nil && m.DataSize != nil {\n\t\treturn *m.DataSize\n\t}\n\treturn 0\n}\n\nfunc (m *ReceiveRequest) GetFlags() int32 {\n\tif m != nil && m.Flags != nil {\n\t\treturn *m.Flags\n\t}\n\treturn Default_ReceiveRequest_Flags\n}\n\nfunc (m *ReceiveRequest) GetTimeoutSeconds() float64 {\n\tif m != nil && m.TimeoutSeconds != nil {\n\t\treturn *m.TimeoutSeconds\n\t}\n\treturn Default_ReceiveRequest_TimeoutSeconds\n}\n\ntype ReceiveReply struct {\n\tStreamOffset     *int64       `protobuf:\"varint,2,opt,name=stream_offset\" json:\"stream_offset,omitempty\"`\n\tData             []byte       `protobuf:\"bytes,3,opt,name=data\" json:\"data,omitempty\"`\n\tReceivedFrom     *AddressPort `protobuf:\"bytes,4,opt,name=received_from\" json:\"received_from,omitempty\"`\n\tBufferSize       *int32       `protobuf:\"varint,5,opt,name=buffer_size\" json:\"buffer_size,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *ReceiveReply) Reset()         { *m = ReceiveReply{} }\nfunc (m *ReceiveReply) String() string { return proto.CompactTextString(m) }\nfunc (*ReceiveReply) ProtoMessage()    {}\n\nfunc (m *ReceiveReply) GetStreamOffset() int64 {\n\tif m != nil && m.StreamOffset != nil {\n\t\treturn *m.StreamOffset\n\t}\n\treturn 0\n}\n\nfunc (m *ReceiveReply) GetData() []byte {\n\tif m != nil {\n\t\treturn m.Data\n\t}\n\treturn nil\n}\n\nfunc (m *ReceiveReply) GetReceivedFrom() *AddressPort {\n\tif m != nil {\n\t\treturn m.ReceivedFrom\n\t}\n\treturn nil\n}\n\nfunc (m *ReceiveReply) GetBufferSize() int32 {\n\tif m != nil && m.BufferSize != nil {\n\t\treturn *m.BufferSize\n\t}\n\treturn 0\n}\n\ntype PollEvent struct {\n\tSocketDescriptor *string `protobuf:\"bytes,1,req,name=socket_descriptor\" json:\"socket_descriptor,omitempty\"`\n\tRequestedEvents  *int32  `protobuf:\"varint,2,req,name=requested_events\" json:\"requested_events,omitempty\"`\n\tObservedEvents   *int32  `protobuf:\"varint,3,req,name=observed_events\" json:\"observed_events,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *PollEvent) Reset()         { *m = PollEvent{} }\nfunc (m *PollEvent) String() string { return proto.CompactTextString(m) }\nfunc (*PollEvent) ProtoMessage()    {}\n\nfunc (m *PollEvent) GetSocketDescriptor() string {\n\tif m != nil && m.SocketDescriptor != nil {\n\t\treturn *m.SocketDescriptor\n\t}\n\treturn \"\"\n}\n\nfunc (m *PollEvent) GetRequestedEvents() int32 {\n\tif m != nil && m.RequestedEvents != nil {\n\t\treturn *m.RequestedEvents\n\t}\n\treturn 0\n}\n\nfunc (m *PollEvent) GetObservedEvents() int32 {\n\tif m != nil && m.ObservedEvents != nil {\n\t\treturn *m.ObservedEvents\n\t}\n\treturn 0\n}\n\ntype PollRequest struct {\n\tEvents           []*PollEvent `protobuf:\"bytes,1,rep,name=events\" json:\"events,omitempty\"`\n\tTimeoutSeconds   *float64     `protobuf:\"fixed64,2,opt,name=timeout_seconds,def=-1\" json:\"timeout_seconds,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *PollRequest) Reset()         { *m = PollRequest{} }\nfunc (m *PollRequest) String() string { return proto.CompactTextString(m) }\nfunc (*PollRequest) ProtoMessage()    {}\n\nconst Default_PollRequest_TimeoutSeconds float64 = -1\n\nfunc (m *PollRequest) GetEvents() []*PollEvent {\n\tif m != nil {\n\t\treturn m.Events\n\t}\n\treturn nil\n}\n\nfunc (m *PollRequest) GetTimeoutSeconds() float64 {\n\tif m != nil && m.TimeoutSeconds != nil {\n\t\treturn *m.TimeoutSeconds\n\t}\n\treturn Default_PollRequest_TimeoutSeconds\n}\n\ntype PollReply struct {\n\tEvents           []*PollEvent `protobuf:\"bytes,2,rep,name=events\" json:\"events,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *PollReply) Reset()         { *m = PollReply{} }\nfunc (m *PollReply) String() string { return proto.CompactTextString(m) }\nfunc (*PollReply) ProtoMessage()    {}\n\nfunc (m *PollReply) GetEvents() []*PollEvent {\n\tif m != nil {\n\t\treturn m.Events\n\t}\n\treturn nil\n}\n\ntype ResolveRequest struct {\n\tName             *string                            `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tAddressFamilies  []CreateSocketRequest_SocketFamily `protobuf:\"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily\" json:\"address_families,omitempty\"`\n\tXXX_unrecognized []byte                             `json:\"-\"`\n}\n\nfunc (m *ResolveRequest) Reset()         { *m = ResolveRequest{} }\nfunc (m *ResolveRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ResolveRequest) ProtoMessage()    {}\n\nfunc (m *ResolveRequest) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily {\n\tif m != nil {\n\t\treturn m.AddressFamilies\n\t}\n\treturn nil\n}\n\ntype ResolveReply struct {\n\tPackedAddress    [][]byte `protobuf:\"bytes,2,rep,name=packed_address\" json:\"packed_address,omitempty\"`\n\tCanonicalName    *string  `protobuf:\"bytes,3,opt,name=canonical_name\" json:\"canonical_name,omitempty\"`\n\tAliases          []string `protobuf:\"bytes,4,rep,name=aliases\" json:\"aliases,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *ResolveReply) Reset()         { *m = ResolveReply{} }\nfunc (m *ResolveReply) String() string { return proto.CompactTextString(m) }\nfunc (*ResolveReply) ProtoMessage()    {}\n\nfunc (m *ResolveReply) GetPackedAddress() [][]byte {\n\tif m != nil {\n\t\treturn m.PackedAddress\n\t}\n\treturn nil\n}\n\nfunc (m *ResolveReply) GetCanonicalName() string {\n\tif m != nil && m.CanonicalName != nil {\n\t\treturn *m.CanonicalName\n\t}\n\treturn \"\"\n}\n\nfunc (m *ResolveReply) GetAliases() []string {\n\tif m != nil {\n\t\treturn m.Aliases\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/socket/socket_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"socket\";\n\npackage appengine;\n\nmessage RemoteSocketServiceError {\n  enum ErrorCode {\n    SYSTEM_ERROR = 1;\n    GAI_ERROR = 2;\n    FAILURE = 4;\n    PERMISSION_DENIED = 5;\n    INVALID_REQUEST = 6;\n    SOCKET_CLOSED = 7;\n  }\n\n  enum SystemError {\n    option allow_alias = true;\n\n    SYS_SUCCESS = 0;\n    SYS_EPERM = 1;\n    SYS_ENOENT = 2;\n    SYS_ESRCH = 3;\n    SYS_EINTR = 4;\n    SYS_EIO = 5;\n    SYS_ENXIO = 6;\n    SYS_E2BIG = 7;\n    SYS_ENOEXEC = 8;\n    SYS_EBADF = 9;\n    SYS_ECHILD = 10;\n    SYS_EAGAIN = 11;\n    SYS_EWOULDBLOCK = 11;\n    SYS_ENOMEM = 12;\n    SYS_EACCES = 13;\n    SYS_EFAULT = 14;\n    SYS_ENOTBLK = 15;\n    SYS_EBUSY = 16;\n    SYS_EEXIST = 17;\n    SYS_EXDEV = 18;\n    SYS_ENODEV = 19;\n    SYS_ENOTDIR = 20;\n    SYS_EISDIR = 21;\n    SYS_EINVAL = 22;\n    SYS_ENFILE = 23;\n    SYS_EMFILE = 24;\n    SYS_ENOTTY = 25;\n    SYS_ETXTBSY = 26;\n    SYS_EFBIG = 27;\n    SYS_ENOSPC = 28;\n    SYS_ESPIPE = 29;\n    SYS_EROFS = 30;\n    SYS_EMLINK = 31;\n    SYS_EPIPE = 32;\n    SYS_EDOM = 33;\n    SYS_ERANGE = 34;\n    SYS_EDEADLK = 35;\n    SYS_EDEADLOCK = 35;\n    SYS_ENAMETOOLONG = 36;\n    SYS_ENOLCK = 37;\n    SYS_ENOSYS = 38;\n    SYS_ENOTEMPTY = 39;\n    SYS_ELOOP = 40;\n    SYS_ENOMSG = 42;\n    SYS_EIDRM = 43;\n    SYS_ECHRNG = 44;\n    SYS_EL2NSYNC = 45;\n    SYS_EL3HLT = 46;\n    SYS_EL3RST = 47;\n    SYS_ELNRNG = 48;\n    SYS_EUNATCH = 49;\n    SYS_ENOCSI = 50;\n    SYS_EL2HLT = 51;\n    SYS_EBADE = 52;\n    SYS_EBADR = 53;\n    SYS_EXFULL = 54;\n    SYS_ENOANO = 55;\n    SYS_EBADRQC = 56;\n    SYS_EBADSLT = 57;\n    SYS_EBFONT = 59;\n    SYS_ENOSTR = 60;\n    SYS_ENODATA = 61;\n    SYS_ETIME = 62;\n    SYS_ENOSR = 63;\n    SYS_ENONET = 64;\n    SYS_ENOPKG = 65;\n    SYS_EREMOTE = 66;\n    SYS_ENOLINK = 67;\n    SYS_EADV = 68;\n    SYS_ESRMNT = 69;\n    SYS_ECOMM = 70;\n    SYS_EPROTO = 71;\n    SYS_EMULTIHOP = 72;\n    SYS_EDOTDOT = 73;\n    SYS_EBADMSG = 74;\n    SYS_EOVERFLOW = 75;\n    SYS_ENOTUNIQ = 76;\n    SYS_EBADFD = 77;\n    SYS_EREMCHG = 78;\n    SYS_ELIBACC = 79;\n    SYS_ELIBBAD = 80;\n    SYS_ELIBSCN = 81;\n    SYS_ELIBMAX = 82;\n    SYS_ELIBEXEC = 83;\n    SYS_EILSEQ = 84;\n    SYS_ERESTART = 85;\n    SYS_ESTRPIPE = 86;\n    SYS_EUSERS = 87;\n    SYS_ENOTSOCK = 88;\n    SYS_EDESTADDRREQ = 89;\n    SYS_EMSGSIZE = 90;\n    SYS_EPROTOTYPE = 91;\n    SYS_ENOPROTOOPT = 92;\n    SYS_EPROTONOSUPPORT = 93;\n    SYS_ESOCKTNOSUPPORT = 94;\n    SYS_EOPNOTSUPP = 95;\n    SYS_ENOTSUP = 95;\n    SYS_EPFNOSUPPORT = 96;\n    SYS_EAFNOSUPPORT = 97;\n    SYS_EADDRINUSE = 98;\n    SYS_EADDRNOTAVAIL = 99;\n    SYS_ENETDOWN = 100;\n    SYS_ENETUNREACH = 101;\n    SYS_ENETRESET = 102;\n    SYS_ECONNABORTED = 103;\n    SYS_ECONNRESET = 104;\n    SYS_ENOBUFS = 105;\n    SYS_EISCONN = 106;\n    SYS_ENOTCONN = 107;\n    SYS_ESHUTDOWN = 108;\n    SYS_ETOOMANYREFS = 109;\n    SYS_ETIMEDOUT = 110;\n    SYS_ECONNREFUSED = 111;\n    SYS_EHOSTDOWN = 112;\n    SYS_EHOSTUNREACH = 113;\n    SYS_EALREADY = 114;\n    SYS_EINPROGRESS = 115;\n    SYS_ESTALE = 116;\n    SYS_EUCLEAN = 117;\n    SYS_ENOTNAM = 118;\n    SYS_ENAVAIL = 119;\n    SYS_EISNAM = 120;\n    SYS_EREMOTEIO = 121;\n    SYS_EDQUOT = 122;\n    SYS_ENOMEDIUM = 123;\n    SYS_EMEDIUMTYPE = 124;\n    SYS_ECANCELED = 125;\n    SYS_ENOKEY = 126;\n    SYS_EKEYEXPIRED = 127;\n    SYS_EKEYREVOKED = 128;\n    SYS_EKEYREJECTED = 129;\n    SYS_EOWNERDEAD = 130;\n    SYS_ENOTRECOVERABLE = 131;\n    SYS_ERFKILL = 132;\n  }\n\n  optional int32 system_error = 1 [default=0];\n  optional string error_detail = 2;\n}\n\nmessage AddressPort {\n  required int32 port = 1;\n  optional bytes packed_address = 2;\n\n  optional string hostname_hint = 3;\n}\n\n\n\nmessage CreateSocketRequest {\n  enum SocketFamily {\n    IPv4 = 1;\n    IPv6 = 2;\n  }\n\n  enum SocketProtocol {\n    TCP = 1;\n    UDP = 2;\n  }\n\n  required SocketFamily family = 1;\n  required SocketProtocol protocol = 2;\n\n  repeated SocketOption socket_options = 3;\n\n  optional AddressPort proxy_external_ip = 4;\n\n  optional int32 listen_backlog = 5 [default=0];\n\n  optional AddressPort remote_ip = 6;\n\n  optional string app_id = 9;\n\n  optional int64 project_id = 10;\n}\n\nmessage CreateSocketReply {\n  optional string socket_descriptor = 1;\n\n  optional AddressPort server_address = 3;\n\n  optional AddressPort proxy_external_ip = 4;\n\n  extensions 1000 to max;\n}\n\n\n\nmessage BindRequest {\n  required string socket_descriptor = 1;\n  required AddressPort proxy_external_ip = 2;\n}\n\nmessage BindReply {\n  optional AddressPort proxy_external_ip = 1;\n}\n\n\n\nmessage GetSocketNameRequest {\n  required string socket_descriptor = 1;\n}\n\nmessage GetSocketNameReply {\n  optional AddressPort proxy_external_ip = 2;\n}\n\n\n\nmessage GetPeerNameRequest {\n  required string socket_descriptor = 1;\n}\n\nmessage GetPeerNameReply {\n  optional AddressPort peer_ip = 2;\n}\n\n\nmessage SocketOption {\n\n  enum SocketOptionLevel {\n    SOCKET_SOL_IP = 0;\n    SOCKET_SOL_SOCKET = 1;\n    SOCKET_SOL_TCP = 6;\n    SOCKET_SOL_UDP = 17;\n  }\n\n  enum SocketOptionName {\n    option allow_alias = true;\n\n    SOCKET_SO_DEBUG = 1;\n    SOCKET_SO_REUSEADDR = 2;\n    SOCKET_SO_TYPE = 3;\n    SOCKET_SO_ERROR = 4;\n    SOCKET_SO_DONTROUTE = 5;\n    SOCKET_SO_BROADCAST = 6;\n    SOCKET_SO_SNDBUF = 7;\n    SOCKET_SO_RCVBUF = 8;\n    SOCKET_SO_KEEPALIVE = 9;\n    SOCKET_SO_OOBINLINE = 10;\n    SOCKET_SO_LINGER = 13;\n    SOCKET_SO_RCVTIMEO = 20;\n    SOCKET_SO_SNDTIMEO = 21;\n\n    SOCKET_IP_TOS = 1;\n    SOCKET_IP_TTL = 2;\n    SOCKET_IP_HDRINCL = 3;\n    SOCKET_IP_OPTIONS = 4;\n\n    SOCKET_TCP_NODELAY = 1;\n    SOCKET_TCP_MAXSEG = 2;\n    SOCKET_TCP_CORK = 3;\n    SOCKET_TCP_KEEPIDLE = 4;\n    SOCKET_TCP_KEEPINTVL = 5;\n    SOCKET_TCP_KEEPCNT = 6;\n    SOCKET_TCP_SYNCNT = 7;\n    SOCKET_TCP_LINGER2 = 8;\n    SOCKET_TCP_DEFER_ACCEPT = 9;\n    SOCKET_TCP_WINDOW_CLAMP = 10;\n    SOCKET_TCP_INFO = 11;\n    SOCKET_TCP_QUICKACK = 12;\n  }\n\n  required SocketOptionLevel level = 1;\n  required SocketOptionName option = 2;\n  required bytes value = 3;\n}\n\n\nmessage SetSocketOptionsRequest {\n  required string socket_descriptor = 1;\n  repeated SocketOption options = 2;\n}\n\nmessage SetSocketOptionsReply {\n}\n\nmessage GetSocketOptionsRequest {\n  required string socket_descriptor = 1;\n  repeated SocketOption options = 2;\n}\n\nmessage GetSocketOptionsReply {\n  repeated SocketOption options = 2;\n}\n\n\nmessage ConnectRequest {\n  required string socket_descriptor = 1;\n  required AddressPort remote_ip = 2;\n  optional double timeout_seconds = 3 [default=-1];\n}\n\nmessage ConnectReply {\n  optional AddressPort proxy_external_ip = 1;\n\n  extensions 1000 to max;\n}\n\n\nmessage ListenRequest {\n  required string socket_descriptor = 1;\n  required int32 backlog = 2;\n}\n\nmessage ListenReply {\n}\n\n\nmessage AcceptRequest {\n  required string socket_descriptor = 1;\n  optional double timeout_seconds = 2 [default=-1];\n}\n\nmessage AcceptReply {\n  optional bytes new_socket_descriptor = 2;\n  optional AddressPort remote_address = 3;\n}\n\n\n\nmessage ShutDownRequest {\n  enum How {\n    SOCKET_SHUT_RD = 1;\n    SOCKET_SHUT_WR = 2;\n    SOCKET_SHUT_RDWR = 3;\n  }\n  required string socket_descriptor = 1;\n  required How how = 2;\n  required int64 send_offset = 3;\n}\n\nmessage ShutDownReply {\n}\n\n\n\nmessage CloseRequest {\n  required string socket_descriptor = 1;\n  optional int64 send_offset = 2 [default=-1];\n}\n\nmessage CloseReply {\n}\n\n\n\nmessage SendRequest {\n  required string socket_descriptor = 1;\n  required bytes data = 2 [ctype=CORD];\n  required int64 stream_offset = 3;\n  optional int32 flags = 4 [default=0];\n  optional AddressPort send_to = 5;\n  optional double timeout_seconds = 6 [default=-1];\n}\n\nmessage SendReply {\n  optional int32 data_sent = 1;\n}\n\n\nmessage ReceiveRequest {\n  enum Flags {\n    MSG_OOB = 1;\n    MSG_PEEK = 2;\n  }\n  required string socket_descriptor = 1;\n  required int32 data_size = 2;\n  optional int32 flags = 3 [default=0];\n  optional double timeout_seconds = 5 [default=-1];\n}\n\nmessage ReceiveReply {\n  optional int64 stream_offset = 2;\n  optional bytes data = 3 [ctype=CORD];\n  optional AddressPort received_from = 4;\n  optional int32 buffer_size = 5;\n}\n\n\n\nmessage PollEvent {\n\n  enum PollEventFlag {\n    SOCKET_POLLNONE = 0;\n    SOCKET_POLLIN = 1;\n    SOCKET_POLLPRI = 2;\n    SOCKET_POLLOUT = 4;\n    SOCKET_POLLERR = 8;\n    SOCKET_POLLHUP = 16;\n    SOCKET_POLLNVAL = 32;\n    SOCKET_POLLRDNORM = 64;\n    SOCKET_POLLRDBAND = 128;\n    SOCKET_POLLWRNORM = 256;\n    SOCKET_POLLWRBAND = 512;\n    SOCKET_POLLMSG = 1024;\n    SOCKET_POLLREMOVE = 4096;\n    SOCKET_POLLRDHUP = 8192;\n  };\n\n  required string socket_descriptor = 1;\n  required int32 requested_events = 2;\n  required int32 observed_events = 3;\n}\n\nmessage PollRequest {\n  repeated PollEvent events = 1;\n  optional double timeout_seconds = 2 [default=-1];\n}\n\nmessage PollReply {\n  repeated PollEvent events = 2;\n}\n\nmessage ResolveRequest {\n  required string name = 1;\n  repeated CreateSocketRequest.SocketFamily address_families = 2;\n}\n\nmessage ResolveReply {\n  enum ErrorCode {\n    SOCKET_EAI_ADDRFAMILY = 1;\n    SOCKET_EAI_AGAIN = 2;\n    SOCKET_EAI_BADFLAGS = 3;\n    SOCKET_EAI_FAIL = 4;\n    SOCKET_EAI_FAMILY = 5;\n    SOCKET_EAI_MEMORY = 6;\n    SOCKET_EAI_NODATA = 7;\n    SOCKET_EAI_NONAME = 8;\n    SOCKET_EAI_SERVICE = 9;\n    SOCKET_EAI_SOCKTYPE = 10;\n    SOCKET_EAI_SYSTEM = 11;\n    SOCKET_EAI_BADHINTS = 12;\n    SOCKET_EAI_PROTOCOL = 13;\n    SOCKET_EAI_OVERFLOW = 14;\n    SOCKET_EAI_MAX = 15;\n  };\n\n  repeated bytes packed_address = 2;\n  optional string canonical_name = 3;\n  repeated string aliases = 4;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/system/system_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/system/system_service.proto\n// DO NOT EDIT!\n\n/*\nPackage system is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/system/system_service.proto\n\nIt has these top-level messages:\n\tSystemServiceError\n\tSystemStat\n\tGetSystemStatsRequest\n\tGetSystemStatsResponse\n\tStartBackgroundRequestRequest\n\tStartBackgroundRequestResponse\n*/\npackage system\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype SystemServiceError_ErrorCode int32\n\nconst (\n\tSystemServiceError_OK               SystemServiceError_ErrorCode = 0\n\tSystemServiceError_INTERNAL_ERROR   SystemServiceError_ErrorCode = 1\n\tSystemServiceError_BACKEND_REQUIRED SystemServiceError_ErrorCode = 2\n\tSystemServiceError_LIMIT_REACHED    SystemServiceError_ErrorCode = 3\n)\n\nvar SystemServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"INTERNAL_ERROR\",\n\t2: \"BACKEND_REQUIRED\",\n\t3: \"LIMIT_REACHED\",\n}\nvar SystemServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":               0,\n\t\"INTERNAL_ERROR\":   1,\n\t\"BACKEND_REQUIRED\": 2,\n\t\"LIMIT_REACHED\":    3,\n}\n\nfunc (x SystemServiceError_ErrorCode) Enum() *SystemServiceError_ErrorCode {\n\tp := new(SystemServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x SystemServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(SystemServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *SystemServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(SystemServiceError_ErrorCode_value, data, \"SystemServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = SystemServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype SystemServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SystemServiceError) Reset()         { *m = SystemServiceError{} }\nfunc (m *SystemServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*SystemServiceError) ProtoMessage()    {}\n\ntype SystemStat struct {\n\t// Instaneous value of this stat.\n\tCurrent *float64 `protobuf:\"fixed64,1,opt,name=current\" json:\"current,omitempty\"`\n\t// Average over time, if this stat has an instaneous value.\n\tAverage1M  *float64 `protobuf:\"fixed64,3,opt,name=average1m\" json:\"average1m,omitempty\"`\n\tAverage10M *float64 `protobuf:\"fixed64,4,opt,name=average10m\" json:\"average10m,omitempty\"`\n\t// Total value, if the stat accumulates over time.\n\tTotal *float64 `protobuf:\"fixed64,2,opt,name=total\" json:\"total,omitempty\"`\n\t// Rate over time, if this stat accumulates.\n\tRate1M           *float64 `protobuf:\"fixed64,5,opt,name=rate1m\" json:\"rate1m,omitempty\"`\n\tRate10M          *float64 `protobuf:\"fixed64,6,opt,name=rate10m\" json:\"rate10m,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *SystemStat) Reset()         { *m = SystemStat{} }\nfunc (m *SystemStat) String() string { return proto.CompactTextString(m) }\nfunc (*SystemStat) ProtoMessage()    {}\n\nfunc (m *SystemStat) GetCurrent() float64 {\n\tif m != nil && m.Current != nil {\n\t\treturn *m.Current\n\t}\n\treturn 0\n}\n\nfunc (m *SystemStat) GetAverage1M() float64 {\n\tif m != nil && m.Average1M != nil {\n\t\treturn *m.Average1M\n\t}\n\treturn 0\n}\n\nfunc (m *SystemStat) GetAverage10M() float64 {\n\tif m != nil && m.Average10M != nil {\n\t\treturn *m.Average10M\n\t}\n\treturn 0\n}\n\nfunc (m *SystemStat) GetTotal() float64 {\n\tif m != nil && m.Total != nil {\n\t\treturn *m.Total\n\t}\n\treturn 0\n}\n\nfunc (m *SystemStat) GetRate1M() float64 {\n\tif m != nil && m.Rate1M != nil {\n\t\treturn *m.Rate1M\n\t}\n\treturn 0\n}\n\nfunc (m *SystemStat) GetRate10M() float64 {\n\tif m != nil && m.Rate10M != nil {\n\t\treturn *m.Rate10M\n\t}\n\treturn 0\n}\n\ntype GetSystemStatsRequest struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *GetSystemStatsRequest) Reset()         { *m = GetSystemStatsRequest{} }\nfunc (m *GetSystemStatsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetSystemStatsRequest) ProtoMessage()    {}\n\ntype GetSystemStatsResponse struct {\n\t// CPU used by this instance, in mcycles.\n\tCpu *SystemStat `protobuf:\"bytes,1,opt,name=cpu\" json:\"cpu,omitempty\"`\n\t// Physical memory (RAM) used by this instance, in megabytes.\n\tMemory           *SystemStat `protobuf:\"bytes,2,opt,name=memory\" json:\"memory,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *GetSystemStatsResponse) Reset()         { *m = GetSystemStatsResponse{} }\nfunc (m *GetSystemStatsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetSystemStatsResponse) ProtoMessage()    {}\n\nfunc (m *GetSystemStatsResponse) GetCpu() *SystemStat {\n\tif m != nil {\n\t\treturn m.Cpu\n\t}\n\treturn nil\n}\n\nfunc (m *GetSystemStatsResponse) GetMemory() *SystemStat {\n\tif m != nil {\n\t\treturn m.Memory\n\t}\n\treturn nil\n}\n\ntype StartBackgroundRequestRequest struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *StartBackgroundRequestRequest) Reset()         { *m = StartBackgroundRequestRequest{} }\nfunc (m *StartBackgroundRequestRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StartBackgroundRequestRequest) ProtoMessage()    {}\n\ntype StartBackgroundRequestResponse struct {\n\t// Every /_ah/background request will have an X-AppEngine-BackgroundRequest\n\t// header, whose value will be equal to this parameter, the request_id.\n\tRequestId        *string `protobuf:\"bytes,1,opt,name=request_id\" json:\"request_id,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *StartBackgroundRequestResponse) Reset()         { *m = StartBackgroundRequestResponse{} }\nfunc (m *StartBackgroundRequestResponse) String() string { return proto.CompactTextString(m) }\nfunc (*StartBackgroundRequestResponse) ProtoMessage()    {}\n\nfunc (m *StartBackgroundRequestResponse) GetRequestId() string {\n\tif m != nil && m.RequestId != nil {\n\t\treturn *m.RequestId\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/system/system_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"system\";\n\npackage appengine;\n\nmessage SystemServiceError {\n  enum ErrorCode {\n    OK = 0;\n    INTERNAL_ERROR = 1;\n    BACKEND_REQUIRED = 2;\n    LIMIT_REACHED = 3;\n  }\n}\n\nmessage SystemStat {\n  // Instaneous value of this stat.\n  optional double current = 1;\n\n  // Average over time, if this stat has an instaneous value.\n  optional double average1m = 3;\n  optional double average10m = 4;\n\n  // Total value, if the stat accumulates over time.\n  optional double total = 2;\n\n  // Rate over time, if this stat accumulates.\n  optional double rate1m = 5;\n  optional double rate10m = 6;\n}\n\nmessage GetSystemStatsRequest {\n}\n\nmessage GetSystemStatsResponse {\n  // CPU used by this instance, in mcycles.\n  optional SystemStat cpu = 1;\n\n  // Physical memory (RAM) used by this instance, in megabytes.\n  optional SystemStat memory = 2;\n}\n\nmessage StartBackgroundRequestRequest {\n}\n\nmessage StartBackgroundRequestResponse {\n  // Every /_ah/background request will have an X-AppEngine-BackgroundRequest\n  // header, whose value will be equal to this parameter, the request_id.\n  optional string request_id = 1;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto\n// DO NOT EDIT!\n\n/*\nPackage taskqueue is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/taskqueue/taskqueue_service.proto\n\nIt has these top-level messages:\n\tTaskQueueServiceError\n\tTaskPayload\n\tTaskQueueRetryParameters\n\tTaskQueueAcl\n\tTaskQueueHttpHeader\n\tTaskQueueMode\n\tTaskQueueAddRequest\n\tTaskQueueAddResponse\n\tTaskQueueBulkAddRequest\n\tTaskQueueBulkAddResponse\n\tTaskQueueDeleteRequest\n\tTaskQueueDeleteResponse\n\tTaskQueueForceRunRequest\n\tTaskQueueForceRunResponse\n\tTaskQueueUpdateQueueRequest\n\tTaskQueueUpdateQueueResponse\n\tTaskQueueFetchQueuesRequest\n\tTaskQueueFetchQueuesResponse\n\tTaskQueueFetchQueueStatsRequest\n\tTaskQueueScannerQueueInfo\n\tTaskQueueFetchQueueStatsResponse\n\tTaskQueuePauseQueueRequest\n\tTaskQueuePauseQueueResponse\n\tTaskQueuePurgeQueueRequest\n\tTaskQueuePurgeQueueResponse\n\tTaskQueueDeleteQueueRequest\n\tTaskQueueDeleteQueueResponse\n\tTaskQueueDeleteGroupRequest\n\tTaskQueueDeleteGroupResponse\n\tTaskQueueQueryTasksRequest\n\tTaskQueueQueryTasksResponse\n\tTaskQueueFetchTaskRequest\n\tTaskQueueFetchTaskResponse\n\tTaskQueueUpdateStorageLimitRequest\n\tTaskQueueUpdateStorageLimitResponse\n\tTaskQueueQueryAndOwnTasksRequest\n\tTaskQueueQueryAndOwnTasksResponse\n\tTaskQueueModifyTaskLeaseRequest\n\tTaskQueueModifyTaskLeaseResponse\n*/\npackage taskqueue\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\nimport appengine \"google.golang.org/appengine/internal/datastore\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype TaskQueueServiceError_ErrorCode int32\n\nconst (\n\tTaskQueueServiceError_OK                              TaskQueueServiceError_ErrorCode = 0\n\tTaskQueueServiceError_UNKNOWN_QUEUE                   TaskQueueServiceError_ErrorCode = 1\n\tTaskQueueServiceError_TRANSIENT_ERROR                 TaskQueueServiceError_ErrorCode = 2\n\tTaskQueueServiceError_INTERNAL_ERROR                  TaskQueueServiceError_ErrorCode = 3\n\tTaskQueueServiceError_TASK_TOO_LARGE                  TaskQueueServiceError_ErrorCode = 4\n\tTaskQueueServiceError_INVALID_TASK_NAME               TaskQueueServiceError_ErrorCode = 5\n\tTaskQueueServiceError_INVALID_QUEUE_NAME              TaskQueueServiceError_ErrorCode = 6\n\tTaskQueueServiceError_INVALID_URL                     TaskQueueServiceError_ErrorCode = 7\n\tTaskQueueServiceError_INVALID_QUEUE_RATE              TaskQueueServiceError_ErrorCode = 8\n\tTaskQueueServiceError_PERMISSION_DENIED               TaskQueueServiceError_ErrorCode = 9\n\tTaskQueueServiceError_TASK_ALREADY_EXISTS             TaskQueueServiceError_ErrorCode = 10\n\tTaskQueueServiceError_TOMBSTONED_TASK                 TaskQueueServiceError_ErrorCode = 11\n\tTaskQueueServiceError_INVALID_ETA                     TaskQueueServiceError_ErrorCode = 12\n\tTaskQueueServiceError_INVALID_REQUEST                 TaskQueueServiceError_ErrorCode = 13\n\tTaskQueueServiceError_UNKNOWN_TASK                    TaskQueueServiceError_ErrorCode = 14\n\tTaskQueueServiceError_TOMBSTONED_QUEUE                TaskQueueServiceError_ErrorCode = 15\n\tTaskQueueServiceError_DUPLICATE_TASK_NAME             TaskQueueServiceError_ErrorCode = 16\n\tTaskQueueServiceError_SKIPPED                         TaskQueueServiceError_ErrorCode = 17\n\tTaskQueueServiceError_TOO_MANY_TASKS                  TaskQueueServiceError_ErrorCode = 18\n\tTaskQueueServiceError_INVALID_PAYLOAD                 TaskQueueServiceError_ErrorCode = 19\n\tTaskQueueServiceError_INVALID_RETRY_PARAMETERS        TaskQueueServiceError_ErrorCode = 20\n\tTaskQueueServiceError_INVALID_QUEUE_MODE              TaskQueueServiceError_ErrorCode = 21\n\tTaskQueueServiceError_ACL_LOOKUP_ERROR                TaskQueueServiceError_ErrorCode = 22\n\tTaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23\n\tTaskQueueServiceError_INCORRECT_CREATOR_NAME          TaskQueueServiceError_ErrorCode = 24\n\tTaskQueueServiceError_TASK_LEASE_EXPIRED              TaskQueueServiceError_ErrorCode = 25\n\tTaskQueueServiceError_QUEUE_PAUSED                    TaskQueueServiceError_ErrorCode = 26\n\tTaskQueueServiceError_INVALID_TAG                     TaskQueueServiceError_ErrorCode = 27\n\t// Reserved range for the Datastore error codes.\n\t// Original Datastore error code is shifted by DATASTORE_ERROR offset.\n\tTaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000\n)\n\nvar TaskQueueServiceError_ErrorCode_name = map[int32]string{\n\t0:     \"OK\",\n\t1:     \"UNKNOWN_QUEUE\",\n\t2:     \"TRANSIENT_ERROR\",\n\t3:     \"INTERNAL_ERROR\",\n\t4:     \"TASK_TOO_LARGE\",\n\t5:     \"INVALID_TASK_NAME\",\n\t6:     \"INVALID_QUEUE_NAME\",\n\t7:     \"INVALID_URL\",\n\t8:     \"INVALID_QUEUE_RATE\",\n\t9:     \"PERMISSION_DENIED\",\n\t10:    \"TASK_ALREADY_EXISTS\",\n\t11:    \"TOMBSTONED_TASK\",\n\t12:    \"INVALID_ETA\",\n\t13:    \"INVALID_REQUEST\",\n\t14:    \"UNKNOWN_TASK\",\n\t15:    \"TOMBSTONED_QUEUE\",\n\t16:    \"DUPLICATE_TASK_NAME\",\n\t17:    \"SKIPPED\",\n\t18:    \"TOO_MANY_TASKS\",\n\t19:    \"INVALID_PAYLOAD\",\n\t20:    \"INVALID_RETRY_PARAMETERS\",\n\t21:    \"INVALID_QUEUE_MODE\",\n\t22:    \"ACL_LOOKUP_ERROR\",\n\t23:    \"TRANSACTIONAL_REQUEST_TOO_LARGE\",\n\t24:    \"INCORRECT_CREATOR_NAME\",\n\t25:    \"TASK_LEASE_EXPIRED\",\n\t26:    \"QUEUE_PAUSED\",\n\t27:    \"INVALID_TAG\",\n\t10000: \"DATASTORE_ERROR\",\n}\nvar TaskQueueServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                              0,\n\t\"UNKNOWN_QUEUE\":                   1,\n\t\"TRANSIENT_ERROR\":                 2,\n\t\"INTERNAL_ERROR\":                  3,\n\t\"TASK_TOO_LARGE\":                  4,\n\t\"INVALID_TASK_NAME\":               5,\n\t\"INVALID_QUEUE_NAME\":              6,\n\t\"INVALID_URL\":                     7,\n\t\"INVALID_QUEUE_RATE\":              8,\n\t\"PERMISSION_DENIED\":               9,\n\t\"TASK_ALREADY_EXISTS\":             10,\n\t\"TOMBSTONED_TASK\":                 11,\n\t\"INVALID_ETA\":                     12,\n\t\"INVALID_REQUEST\":                 13,\n\t\"UNKNOWN_TASK\":                    14,\n\t\"TOMBSTONED_QUEUE\":                15,\n\t\"DUPLICATE_TASK_NAME\":             16,\n\t\"SKIPPED\":                         17,\n\t\"TOO_MANY_TASKS\":                  18,\n\t\"INVALID_PAYLOAD\":                 19,\n\t\"INVALID_RETRY_PARAMETERS\":        20,\n\t\"INVALID_QUEUE_MODE\":              21,\n\t\"ACL_LOOKUP_ERROR\":                22,\n\t\"TRANSACTIONAL_REQUEST_TOO_LARGE\": 23,\n\t\"INCORRECT_CREATOR_NAME\":          24,\n\t\"TASK_LEASE_EXPIRED\":              25,\n\t\"QUEUE_PAUSED\":                    26,\n\t\"INVALID_TAG\":                     27,\n\t\"DATASTORE_ERROR\":                 10000,\n}\n\nfunc (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode {\n\tp := new(TaskQueueServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x TaskQueueServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, \"TaskQueueServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = TaskQueueServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype TaskQueueMode_Mode int32\n\nconst (\n\tTaskQueueMode_PUSH TaskQueueMode_Mode = 0\n\tTaskQueueMode_PULL TaskQueueMode_Mode = 1\n)\n\nvar TaskQueueMode_Mode_name = map[int32]string{\n\t0: \"PUSH\",\n\t1: \"PULL\",\n}\nvar TaskQueueMode_Mode_value = map[string]int32{\n\t\"PUSH\": 0,\n\t\"PULL\": 1,\n}\n\nfunc (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode {\n\tp := new(TaskQueueMode_Mode)\n\t*p = x\n\treturn p\n}\nfunc (x TaskQueueMode_Mode) String() string {\n\treturn proto.EnumName(TaskQueueMode_Mode_name, int32(x))\n}\nfunc (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, \"TaskQueueMode_Mode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = TaskQueueMode_Mode(value)\n\treturn nil\n}\n\ntype TaskQueueAddRequest_RequestMethod int32\n\nconst (\n\tTaskQueueAddRequest_GET    TaskQueueAddRequest_RequestMethod = 1\n\tTaskQueueAddRequest_POST   TaskQueueAddRequest_RequestMethod = 2\n\tTaskQueueAddRequest_HEAD   TaskQueueAddRequest_RequestMethod = 3\n\tTaskQueueAddRequest_PUT    TaskQueueAddRequest_RequestMethod = 4\n\tTaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5\n)\n\nvar TaskQueueAddRequest_RequestMethod_name = map[int32]string{\n\t1: \"GET\",\n\t2: \"POST\",\n\t3: \"HEAD\",\n\t4: \"PUT\",\n\t5: \"DELETE\",\n}\nvar TaskQueueAddRequest_RequestMethod_value = map[string]int32{\n\t\"GET\":    1,\n\t\"POST\":   2,\n\t\"HEAD\":   3,\n\t\"PUT\":    4,\n\t\"DELETE\": 5,\n}\n\nfunc (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod {\n\tp := new(TaskQueueAddRequest_RequestMethod)\n\t*p = x\n\treturn p\n}\nfunc (x TaskQueueAddRequest_RequestMethod) String() string {\n\treturn proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x))\n}\nfunc (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, \"TaskQueueAddRequest_RequestMethod\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = TaskQueueAddRequest_RequestMethod(value)\n\treturn nil\n}\n\ntype TaskQueueQueryTasksResponse_Task_RequestMethod int32\n\nconst (\n\tTaskQueueQueryTasksResponse_Task_GET    TaskQueueQueryTasksResponse_Task_RequestMethod = 1\n\tTaskQueueQueryTasksResponse_Task_POST   TaskQueueQueryTasksResponse_Task_RequestMethod = 2\n\tTaskQueueQueryTasksResponse_Task_HEAD   TaskQueueQueryTasksResponse_Task_RequestMethod = 3\n\tTaskQueueQueryTasksResponse_Task_PUT    TaskQueueQueryTasksResponse_Task_RequestMethod = 4\n\tTaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5\n)\n\nvar TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{\n\t1: \"GET\",\n\t2: \"POST\",\n\t3: \"HEAD\",\n\t4: \"PUT\",\n\t5: \"DELETE\",\n}\nvar TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{\n\t\"GET\":    1,\n\t\"POST\":   2,\n\t\"HEAD\":   3,\n\t\"PUT\":    4,\n\t\"DELETE\": 5,\n}\n\nfunc (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod {\n\tp := new(TaskQueueQueryTasksResponse_Task_RequestMethod)\n\t*p = x\n\treturn p\n}\nfunc (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string {\n\treturn proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x))\n}\nfunc (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, \"TaskQueueQueryTasksResponse_Task_RequestMethod\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = TaskQueueQueryTasksResponse_Task_RequestMethod(value)\n\treturn nil\n}\n\ntype TaskQueueServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueServiceError) Reset()         { *m = TaskQueueServiceError{} }\nfunc (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueServiceError) ProtoMessage()    {}\n\ntype TaskPayload struct {\n\tXXX_extensions   map[int32]proto.Extension `json:\"-\"`\n\tXXX_unrecognized []byte                    `json:\"-\"`\n}\n\nfunc (m *TaskPayload) Reset()         { *m = TaskPayload{} }\nfunc (m *TaskPayload) String() string { return proto.CompactTextString(m) }\nfunc (*TaskPayload) ProtoMessage()    {}\n\nfunc (m *TaskPayload) Marshal() ([]byte, error) {\n\treturn proto.MarshalMessageSet(m.ExtensionMap())\n}\nfunc (m *TaskPayload) Unmarshal(buf []byte) error {\n\treturn proto.UnmarshalMessageSet(buf, m.ExtensionMap())\n}\nfunc (m *TaskPayload) MarshalJSON() ([]byte, error) {\n\treturn proto.MarshalMessageSetJSON(m.XXX_extensions)\n}\nfunc (m *TaskPayload) UnmarshalJSON(buf []byte) error {\n\treturn proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)\n}\n\n// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler\nvar _ proto.Marshaler = (*TaskPayload)(nil)\nvar _ proto.Unmarshaler = (*TaskPayload)(nil)\n\nvar extRange_TaskPayload = []proto.ExtensionRange{\n\t{10, 2147483646},\n}\n\nfunc (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange {\n\treturn extRange_TaskPayload\n}\nfunc (m *TaskPayload) ExtensionMap() map[int32]proto.Extension {\n\tif m.XXX_extensions == nil {\n\t\tm.XXX_extensions = make(map[int32]proto.Extension)\n\t}\n\treturn m.XXX_extensions\n}\n\ntype TaskQueueRetryParameters struct {\n\tRetryLimit       *int32   `protobuf:\"varint,1,opt,name=retry_limit\" json:\"retry_limit,omitempty\"`\n\tAgeLimitSec      *int64   `protobuf:\"varint,2,opt,name=age_limit_sec\" json:\"age_limit_sec,omitempty\"`\n\tMinBackoffSec    *float64 `protobuf:\"fixed64,3,opt,name=min_backoff_sec,def=0.1\" json:\"min_backoff_sec,omitempty\"`\n\tMaxBackoffSec    *float64 `protobuf:\"fixed64,4,opt,name=max_backoff_sec,def=3600\" json:\"max_backoff_sec,omitempty\"`\n\tMaxDoublings     *int32   `protobuf:\"varint,5,opt,name=max_doublings,def=16\" json:\"max_doublings,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *TaskQueueRetryParameters) Reset()         { *m = TaskQueueRetryParameters{} }\nfunc (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueRetryParameters) ProtoMessage()    {}\n\nconst Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1\nconst Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600\nconst Default_TaskQueueRetryParameters_MaxDoublings int32 = 16\n\nfunc (m *TaskQueueRetryParameters) GetRetryLimit() int32 {\n\tif m != nil && m.RetryLimit != nil {\n\t\treturn *m.RetryLimit\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 {\n\tif m != nil && m.AgeLimitSec != nil {\n\t\treturn *m.AgeLimitSec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 {\n\tif m != nil && m.MinBackoffSec != nil {\n\t\treturn *m.MinBackoffSec\n\t}\n\treturn Default_TaskQueueRetryParameters_MinBackoffSec\n}\n\nfunc (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 {\n\tif m != nil && m.MaxBackoffSec != nil {\n\t\treturn *m.MaxBackoffSec\n\t}\n\treturn Default_TaskQueueRetryParameters_MaxBackoffSec\n}\n\nfunc (m *TaskQueueRetryParameters) GetMaxDoublings() int32 {\n\tif m != nil && m.MaxDoublings != nil {\n\t\treturn *m.MaxDoublings\n\t}\n\treturn Default_TaskQueueRetryParameters_MaxDoublings\n}\n\ntype TaskQueueAcl struct {\n\tUserEmail        [][]byte `protobuf:\"bytes,1,rep,name=user_email\" json:\"user_email,omitempty\"`\n\tWriterEmail      [][]byte `protobuf:\"bytes,2,rep,name=writer_email\" json:\"writer_email,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *TaskQueueAcl) Reset()         { *m = TaskQueueAcl{} }\nfunc (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueAcl) ProtoMessage()    {}\n\nfunc (m *TaskQueueAcl) GetUserEmail() [][]byte {\n\tif m != nil {\n\t\treturn m.UserEmail\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAcl) GetWriterEmail() [][]byte {\n\tif m != nil {\n\t\treturn m.WriterEmail\n\t}\n\treturn nil\n}\n\ntype TaskQueueHttpHeader struct {\n\tKey              []byte `protobuf:\"bytes,1,req,name=key\" json:\"key,omitempty\"`\n\tValue            []byte `protobuf:\"bytes,2,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueHttpHeader) Reset()         { *m = TaskQueueHttpHeader{} }\nfunc (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueHttpHeader) ProtoMessage()    {}\n\nfunc (m *TaskQueueHttpHeader) GetKey() []byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueHttpHeader) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype TaskQueueMode struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueMode) Reset()         { *m = TaskQueueMode{} }\nfunc (m *TaskQueueMode) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueMode) ProtoMessage()    {}\n\ntype TaskQueueAddRequest struct {\n\tQueueName        []byte                             `protobuf:\"bytes,1,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tTaskName         []byte                             `protobuf:\"bytes,2,req,name=task_name\" json:\"task_name,omitempty\"`\n\tEtaUsec          *int64                             `protobuf:\"varint,3,req,name=eta_usec\" json:\"eta_usec,omitempty\"`\n\tMethod           *TaskQueueAddRequest_RequestMethod `protobuf:\"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2\" json:\"method,omitempty\"`\n\tUrl              []byte                             `protobuf:\"bytes,4,opt,name=url\" json:\"url,omitempty\"`\n\tHeader           []*TaskQueueAddRequest_Header      `protobuf:\"group,6,rep,name=Header\" json:\"header,omitempty\"`\n\tBody             []byte                             `protobuf:\"bytes,9,opt,name=body\" json:\"body,omitempty\"`\n\tTransaction      *appengine.Transaction             `protobuf:\"bytes,10,opt,name=transaction\" json:\"transaction,omitempty\"`\n\tAppId            []byte                             `protobuf:\"bytes,11,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tCrontimetable    *TaskQueueAddRequest_CronTimetable `protobuf:\"group,12,opt,name=CronTimetable\" json:\"crontimetable,omitempty\"`\n\tDescription      []byte                             `protobuf:\"bytes,15,opt,name=description\" json:\"description,omitempty\"`\n\tPayload          *TaskPayload                       `protobuf:\"bytes,16,opt,name=payload\" json:\"payload,omitempty\"`\n\tRetryParameters  *TaskQueueRetryParameters          `protobuf:\"bytes,17,opt,name=retry_parameters\" json:\"retry_parameters,omitempty\"`\n\tMode             *TaskQueueMode_Mode                `protobuf:\"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0\" json:\"mode,omitempty\"`\n\tTag              []byte                             `protobuf:\"bytes,19,opt,name=tag\" json:\"tag,omitempty\"`\n\tXXX_unrecognized []byte                             `json:\"-\"`\n}\n\nfunc (m *TaskQueueAddRequest) Reset()         { *m = TaskQueueAddRequest{} }\nfunc (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueAddRequest) ProtoMessage()    {}\n\nconst Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST\nconst Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH\n\nfunc (m *TaskQueueAddRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetTaskName() []byte {\n\tif m != nil {\n\t\treturn m.TaskName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetEtaUsec() int64 {\n\tif m != nil && m.EtaUsec != nil {\n\t\treturn *m.EtaUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod {\n\tif m != nil && m.Method != nil {\n\t\treturn *m.Method\n\t}\n\treturn Default_TaskQueueAddRequest_Method\n}\n\nfunc (m *TaskQueueAddRequest) GetUrl() []byte {\n\tif m != nil {\n\t\treturn m.Url\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetBody() []byte {\n\tif m != nil {\n\t\treturn m.Body\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable {\n\tif m != nil {\n\t\treturn m.Crontimetable\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetDescription() []byte {\n\tif m != nil {\n\t\treturn m.Description\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetPayload() *TaskPayload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters {\n\tif m != nil {\n\t\treturn m.RetryParameters\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode {\n\tif m != nil && m.Mode != nil {\n\t\treturn *m.Mode\n\t}\n\treturn Default_TaskQueueAddRequest_Mode\n}\n\nfunc (m *TaskQueueAddRequest) GetTag() []byte {\n\tif m != nil {\n\t\treturn m.Tag\n\t}\n\treturn nil\n}\n\ntype TaskQueueAddRequest_Header struct {\n\tKey              []byte `protobuf:\"bytes,7,req,name=key\" json:\"key,omitempty\"`\n\tValue            []byte `protobuf:\"bytes,8,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueAddRequest_Header) Reset()         { *m = TaskQueueAddRequest_Header{} }\nfunc (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueAddRequest_Header) ProtoMessage()    {}\n\nfunc (m *TaskQueueAddRequest_Header) GetKey() []byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest_Header) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype TaskQueueAddRequest_CronTimetable struct {\n\tSchedule         []byte `protobuf:\"bytes,13,req,name=schedule\" json:\"schedule,omitempty\"`\n\tTimezone         []byte `protobuf:\"bytes,14,req,name=timezone\" json:\"timezone,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueAddRequest_CronTimetable) Reset()         { *m = TaskQueueAddRequest_CronTimetable{} }\nfunc (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueAddRequest_CronTimetable) ProtoMessage()    {}\n\nfunc (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte {\n\tif m != nil {\n\t\treturn m.Schedule\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte {\n\tif m != nil {\n\t\treturn m.Timezone\n\t}\n\treturn nil\n}\n\ntype TaskQueueAddResponse struct {\n\tChosenTaskName   []byte `protobuf:\"bytes,1,opt,name=chosen_task_name\" json:\"chosen_task_name,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueAddResponse) Reset()         { *m = TaskQueueAddResponse{} }\nfunc (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueAddResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueAddResponse) GetChosenTaskName() []byte {\n\tif m != nil {\n\t\treturn m.ChosenTaskName\n\t}\n\treturn nil\n}\n\ntype TaskQueueBulkAddRequest struct {\n\tAddRequest       []*TaskQueueAddRequest `protobuf:\"bytes,1,rep,name=add_request\" json:\"add_request,omitempty\"`\n\tXXX_unrecognized []byte                 `json:\"-\"`\n}\n\nfunc (m *TaskQueueBulkAddRequest) Reset()         { *m = TaskQueueBulkAddRequest{} }\nfunc (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueBulkAddRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest {\n\tif m != nil {\n\t\treturn m.AddRequest\n\t}\n\treturn nil\n}\n\ntype TaskQueueBulkAddResponse struct {\n\tTaskresult       []*TaskQueueBulkAddResponse_TaskResult `protobuf:\"group,1,rep,name=TaskResult\" json:\"taskresult,omitempty\"`\n\tXXX_unrecognized []byte                                 `json:\"-\"`\n}\n\nfunc (m *TaskQueueBulkAddResponse) Reset()         { *m = TaskQueueBulkAddResponse{} }\nfunc (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueBulkAddResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult {\n\tif m != nil {\n\t\treturn m.Taskresult\n\t}\n\treturn nil\n}\n\ntype TaskQueueBulkAddResponse_TaskResult struct {\n\tResult           *TaskQueueServiceError_ErrorCode `protobuf:\"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode\" json:\"result,omitempty\"`\n\tChosenTaskName   []byte                           `protobuf:\"bytes,3,opt,name=chosen_task_name\" json:\"chosen_task_name,omitempty\"`\n\tXXX_unrecognized []byte                           `json:\"-\"`\n}\n\nfunc (m *TaskQueueBulkAddResponse_TaskResult) Reset()         { *m = TaskQueueBulkAddResponse_TaskResult{} }\nfunc (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage()    {}\n\nfunc (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode {\n\tif m != nil && m.Result != nil {\n\t\treturn *m.Result\n\t}\n\treturn TaskQueueServiceError_OK\n}\n\nfunc (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte {\n\tif m != nil {\n\t\treturn m.ChosenTaskName\n\t}\n\treturn nil\n}\n\ntype TaskQueueDeleteRequest struct {\n\tQueueName        []byte   `protobuf:\"bytes,1,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tTaskName         [][]byte `protobuf:\"bytes,2,rep,name=task_name\" json:\"task_name,omitempty\"`\n\tAppId            []byte   `protobuf:\"bytes,3,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *TaskQueueDeleteRequest) Reset()         { *m = TaskQueueDeleteRequest{} }\nfunc (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueDeleteRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueDeleteRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueDeleteRequest) GetTaskName() [][]byte {\n\tif m != nil {\n\t\treturn m.TaskName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueDeleteRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype TaskQueueDeleteResponse struct {\n\tResult           []TaskQueueServiceError_ErrorCode `protobuf:\"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode\" json:\"result,omitempty\"`\n\tXXX_unrecognized []byte                            `json:\"-\"`\n}\n\nfunc (m *TaskQueueDeleteResponse) Reset()         { *m = TaskQueueDeleteResponse{} }\nfunc (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueDeleteResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode {\n\tif m != nil {\n\t\treturn m.Result\n\t}\n\treturn nil\n}\n\ntype TaskQueueForceRunRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName        []byte `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tTaskName         []byte `protobuf:\"bytes,3,req,name=task_name\" json:\"task_name,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueForceRunRequest) Reset()         { *m = TaskQueueForceRunRequest{} }\nfunc (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueForceRunRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueForceRunRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueForceRunRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueForceRunRequest) GetTaskName() []byte {\n\tif m != nil {\n\t\treturn m.TaskName\n\t}\n\treturn nil\n}\n\ntype TaskQueueForceRunResponse struct {\n\tResult           *TaskQueueServiceError_ErrorCode `protobuf:\"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode\" json:\"result,omitempty\"`\n\tXXX_unrecognized []byte                           `json:\"-\"`\n}\n\nfunc (m *TaskQueueForceRunResponse) Reset()         { *m = TaskQueueForceRunResponse{} }\nfunc (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueForceRunResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode {\n\tif m != nil && m.Result != nil {\n\t\treturn *m.Result\n\t}\n\treturn TaskQueueServiceError_OK\n}\n\ntype TaskQueueUpdateQueueRequest struct {\n\tAppId                 []byte                    `protobuf:\"bytes,1,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName             []byte                    `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tBucketRefillPerSecond *float64                  `protobuf:\"fixed64,3,req,name=bucket_refill_per_second\" json:\"bucket_refill_per_second,omitempty\"`\n\tBucketCapacity        *int32                    `protobuf:\"varint,4,req,name=bucket_capacity\" json:\"bucket_capacity,omitempty\"`\n\tUserSpecifiedRate     *string                   `protobuf:\"bytes,5,opt,name=user_specified_rate\" json:\"user_specified_rate,omitempty\"`\n\tRetryParameters       *TaskQueueRetryParameters `protobuf:\"bytes,6,opt,name=retry_parameters\" json:\"retry_parameters,omitempty\"`\n\tMaxConcurrentRequests *int32                    `protobuf:\"varint,7,opt,name=max_concurrent_requests\" json:\"max_concurrent_requests,omitempty\"`\n\tMode                  *TaskQueueMode_Mode       `protobuf:\"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0\" json:\"mode,omitempty\"`\n\tAcl                   *TaskQueueAcl             `protobuf:\"bytes,9,opt,name=acl\" json:\"acl,omitempty\"`\n\tHeaderOverride        []*TaskQueueHttpHeader    `protobuf:\"bytes,10,rep,name=header_override\" json:\"header_override,omitempty\"`\n\tXXX_unrecognized      []byte                    `json:\"-\"`\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) Reset()         { *m = TaskQueueUpdateQueueRequest{} }\nfunc (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueUpdateQueueRequest) ProtoMessage()    {}\n\nconst Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH\n\nfunc (m *TaskQueueUpdateQueueRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 {\n\tif m != nil && m.BucketRefillPerSecond != nil {\n\t\treturn *m.BucketRefillPerSecond\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 {\n\tif m != nil && m.BucketCapacity != nil {\n\t\treturn *m.BucketCapacity\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string {\n\tif m != nil && m.UserSpecifiedRate != nil {\n\t\treturn *m.UserSpecifiedRate\n\t}\n\treturn \"\"\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters {\n\tif m != nil {\n\t\treturn m.RetryParameters\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 {\n\tif m != nil && m.MaxConcurrentRequests != nil {\n\t\treturn *m.MaxConcurrentRequests\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode {\n\tif m != nil && m.Mode != nil {\n\t\treturn *m.Mode\n\t}\n\treturn Default_TaskQueueUpdateQueueRequest_Mode\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl {\n\tif m != nil {\n\t\treturn m.Acl\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader {\n\tif m != nil {\n\t\treturn m.HeaderOverride\n\t}\n\treturn nil\n}\n\ntype TaskQueueUpdateQueueResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueUpdateQueueResponse) Reset()         { *m = TaskQueueUpdateQueueResponse{} }\nfunc (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueUpdateQueueResponse) ProtoMessage()    {}\n\ntype TaskQueueFetchQueuesRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tMaxRows          *int32 `protobuf:\"varint,2,req,name=max_rows\" json:\"max_rows,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchQueuesRequest) Reset()         { *m = TaskQueueFetchQueuesRequest{} }\nfunc (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueFetchQueuesRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueFetchQueuesRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 {\n\tif m != nil && m.MaxRows != nil {\n\t\treturn *m.MaxRows\n\t}\n\treturn 0\n}\n\ntype TaskQueueFetchQueuesResponse struct {\n\tQueue            []*TaskQueueFetchQueuesResponse_Queue `protobuf:\"group,1,rep,name=Queue\" json:\"queue,omitempty\"`\n\tXXX_unrecognized []byte                                `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchQueuesResponse) Reset()         { *m = TaskQueueFetchQueuesResponse{} }\nfunc (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueFetchQueuesResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue {\n\tif m != nil {\n\t\treturn m.Queue\n\t}\n\treturn nil\n}\n\ntype TaskQueueFetchQueuesResponse_Queue struct {\n\tQueueName             []byte                    `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tBucketRefillPerSecond *float64                  `protobuf:\"fixed64,3,req,name=bucket_refill_per_second\" json:\"bucket_refill_per_second,omitempty\"`\n\tBucketCapacity        *float64                  `protobuf:\"fixed64,4,req,name=bucket_capacity\" json:\"bucket_capacity,omitempty\"`\n\tUserSpecifiedRate     *string                   `protobuf:\"bytes,5,opt,name=user_specified_rate\" json:\"user_specified_rate,omitempty\"`\n\tPaused                *bool                     `protobuf:\"varint,6,req,name=paused,def=0\" json:\"paused,omitempty\"`\n\tRetryParameters       *TaskQueueRetryParameters `protobuf:\"bytes,7,opt,name=retry_parameters\" json:\"retry_parameters,omitempty\"`\n\tMaxConcurrentRequests *int32                    `protobuf:\"varint,8,opt,name=max_concurrent_requests\" json:\"max_concurrent_requests,omitempty\"`\n\tMode                  *TaskQueueMode_Mode       `protobuf:\"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0\" json:\"mode,omitempty\"`\n\tAcl                   *TaskQueueAcl             `protobuf:\"bytes,10,opt,name=acl\" json:\"acl,omitempty\"`\n\tHeaderOverride        []*TaskQueueHttpHeader    `protobuf:\"bytes,11,rep,name=header_override\" json:\"header_override,omitempty\"`\n\tCreatorName           *string                   `protobuf:\"bytes,12,opt,name=creator_name,def=apphosting\" json:\"creator_name,omitempty\"`\n\tXXX_unrecognized      []byte                    `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) Reset()         { *m = TaskQueueFetchQueuesResponse_Queue{} }\nfunc (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage()    {}\n\nconst Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false\nconst Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH\nconst Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = \"apphosting\"\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 {\n\tif m != nil && m.BucketRefillPerSecond != nil {\n\t\treturn *m.BucketRefillPerSecond\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 {\n\tif m != nil && m.BucketCapacity != nil {\n\t\treturn *m.BucketCapacity\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string {\n\tif m != nil && m.UserSpecifiedRate != nil {\n\t\treturn *m.UserSpecifiedRate\n\t}\n\treturn \"\"\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool {\n\tif m != nil && m.Paused != nil {\n\t\treturn *m.Paused\n\t}\n\treturn Default_TaskQueueFetchQueuesResponse_Queue_Paused\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters {\n\tif m != nil {\n\t\treturn m.RetryParameters\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 {\n\tif m != nil && m.MaxConcurrentRequests != nil {\n\t\treturn *m.MaxConcurrentRequests\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode {\n\tif m != nil && m.Mode != nil {\n\t\treturn *m.Mode\n\t}\n\treturn Default_TaskQueueFetchQueuesResponse_Queue_Mode\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl {\n\tif m != nil {\n\t\treturn m.Acl\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader {\n\tif m != nil {\n\t\treturn m.HeaderOverride\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string {\n\tif m != nil && m.CreatorName != nil {\n\t\treturn *m.CreatorName\n\t}\n\treturn Default_TaskQueueFetchQueuesResponse_Queue_CreatorName\n}\n\ntype TaskQueueFetchQueueStatsRequest struct {\n\tAppId            []byte   `protobuf:\"bytes,1,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName        [][]byte `protobuf:\"bytes,2,rep,name=queue_name\" json:\"queue_name,omitempty\"`\n\tMaxNumTasks      *int32   `protobuf:\"varint,3,opt,name=max_num_tasks,def=0\" json:\"max_num_tasks,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchQueueStatsRequest) Reset()         { *m = TaskQueueFetchQueueStatsRequest{} }\nfunc (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueFetchQueueStatsRequest) ProtoMessage()    {}\n\nconst Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0\n\nfunc (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 {\n\tif m != nil && m.MaxNumTasks != nil {\n\t\treturn *m.MaxNumTasks\n\t}\n\treturn Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks\n}\n\ntype TaskQueueScannerQueueInfo struct {\n\tExecutedLastMinute      *int64   `protobuf:\"varint,1,req,name=executed_last_minute\" json:\"executed_last_minute,omitempty\"`\n\tExecutedLastHour        *int64   `protobuf:\"varint,2,req,name=executed_last_hour\" json:\"executed_last_hour,omitempty\"`\n\tSamplingDurationSeconds *float64 `protobuf:\"fixed64,3,req,name=sampling_duration_seconds\" json:\"sampling_duration_seconds,omitempty\"`\n\tRequestsInFlight        *int32   `protobuf:\"varint,4,opt,name=requests_in_flight\" json:\"requests_in_flight,omitempty\"`\n\tEnforcedRate            *float64 `protobuf:\"fixed64,5,opt,name=enforced_rate\" json:\"enforced_rate,omitempty\"`\n\tXXX_unrecognized        []byte   `json:\"-\"`\n}\n\nfunc (m *TaskQueueScannerQueueInfo) Reset()         { *m = TaskQueueScannerQueueInfo{} }\nfunc (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueScannerQueueInfo) ProtoMessage()    {}\n\nfunc (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 {\n\tif m != nil && m.ExecutedLastMinute != nil {\n\t\treturn *m.ExecutedLastMinute\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 {\n\tif m != nil && m.ExecutedLastHour != nil {\n\t\treturn *m.ExecutedLastHour\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 {\n\tif m != nil && m.SamplingDurationSeconds != nil {\n\t\treturn *m.SamplingDurationSeconds\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 {\n\tif m != nil && m.RequestsInFlight != nil {\n\t\treturn *m.RequestsInFlight\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 {\n\tif m != nil && m.EnforcedRate != nil {\n\t\treturn *m.EnforcedRate\n\t}\n\treturn 0\n}\n\ntype TaskQueueFetchQueueStatsResponse struct {\n\tQueuestats       []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:\"group,1,rep,name=QueueStats\" json:\"queuestats,omitempty\"`\n\tXXX_unrecognized []byte                                         `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchQueueStatsResponse) Reset()         { *m = TaskQueueFetchQueueStatsResponse{} }\nfunc (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueFetchQueueStatsResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats {\n\tif m != nil {\n\t\treturn m.Queuestats\n\t}\n\treturn nil\n}\n\ntype TaskQueueFetchQueueStatsResponse_QueueStats struct {\n\tNumTasks         *int32                     `protobuf:\"varint,2,req,name=num_tasks\" json:\"num_tasks,omitempty\"`\n\tOldestEtaUsec    *int64                     `protobuf:\"varint,3,req,name=oldest_eta_usec\" json:\"oldest_eta_usec,omitempty\"`\n\tScannerInfo      *TaskQueueScannerQueueInfo `protobuf:\"bytes,4,opt,name=scanner_info\" json:\"scanner_info,omitempty\"`\n\tXXX_unrecognized []byte                     `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() {\n\t*m = TaskQueueFetchQueueStatsResponse_QueueStats{}\n}\nfunc (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string {\n\treturn proto.CompactTextString(m)\n}\nfunc (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {}\n\nfunc (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 {\n\tif m != nil && m.NumTasks != nil {\n\t\treturn *m.NumTasks\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 {\n\tif m != nil && m.OldestEtaUsec != nil {\n\t\treturn *m.OldestEtaUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo {\n\tif m != nil {\n\t\treturn m.ScannerInfo\n\t}\n\treturn nil\n}\n\ntype TaskQueuePauseQueueRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName        []byte `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tPause            *bool  `protobuf:\"varint,3,req,name=pause\" json:\"pause,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueuePauseQueueRequest) Reset()         { *m = TaskQueuePauseQueueRequest{} }\nfunc (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueuePauseQueueRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueuePauseQueueRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueuePauseQueueRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueuePauseQueueRequest) GetPause() bool {\n\tif m != nil && m.Pause != nil {\n\t\treturn *m.Pause\n\t}\n\treturn false\n}\n\ntype TaskQueuePauseQueueResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueuePauseQueueResponse) Reset()         { *m = TaskQueuePauseQueueResponse{} }\nfunc (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueuePauseQueueResponse) ProtoMessage()    {}\n\ntype TaskQueuePurgeQueueRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName        []byte `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueuePurgeQueueRequest) Reset()         { *m = TaskQueuePurgeQueueRequest{} }\nfunc (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueuePurgeQueueRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueuePurgeQueueRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\ntype TaskQueuePurgeQueueResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueuePurgeQueueResponse) Reset()         { *m = TaskQueuePurgeQueueResponse{} }\nfunc (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueuePurgeQueueResponse) ProtoMessage()    {}\n\ntype TaskQueueDeleteQueueRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName        []byte `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueDeleteQueueRequest) Reset()         { *m = TaskQueueDeleteQueueRequest{} }\nfunc (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueDeleteQueueRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueDeleteQueueRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\ntype TaskQueueDeleteQueueResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueDeleteQueueResponse) Reset()         { *m = TaskQueueDeleteQueueResponse{} }\nfunc (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueDeleteQueueResponse) ProtoMessage()    {}\n\ntype TaskQueueDeleteGroupRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueDeleteGroupRequest) Reset()         { *m = TaskQueueDeleteGroupRequest{} }\nfunc (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueDeleteGroupRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueDeleteGroupRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\ntype TaskQueueDeleteGroupResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueDeleteGroupResponse) Reset()         { *m = TaskQueueDeleteGroupResponse{} }\nfunc (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueDeleteGroupResponse) ProtoMessage()    {}\n\ntype TaskQueueQueryTasksRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName        []byte `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tStartTaskName    []byte `protobuf:\"bytes,3,opt,name=start_task_name\" json:\"start_task_name,omitempty\"`\n\tStartEtaUsec     *int64 `protobuf:\"varint,4,opt,name=start_eta_usec\" json:\"start_eta_usec,omitempty\"`\n\tStartTag         []byte `protobuf:\"bytes,6,opt,name=start_tag\" json:\"start_tag,omitempty\"`\n\tMaxRows          *int32 `protobuf:\"varint,5,opt,name=max_rows,def=1\" json:\"max_rows,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryTasksRequest) Reset()         { *m = TaskQueueQueryTasksRequest{} }\nfunc (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryTasksRequest) ProtoMessage()    {}\n\nconst Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1\n\nfunc (m *TaskQueueQueryTasksRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte {\n\tif m != nil {\n\t\treturn m.StartTaskName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 {\n\tif m != nil && m.StartEtaUsec != nil {\n\t\treturn *m.StartEtaUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksRequest) GetStartTag() []byte {\n\tif m != nil {\n\t\treturn m.StartTag\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 {\n\tif m != nil && m.MaxRows != nil {\n\t\treturn *m.MaxRows\n\t}\n\treturn Default_TaskQueueQueryTasksRequest_MaxRows\n}\n\ntype TaskQueueQueryTasksResponse struct {\n\tTask             []*TaskQueueQueryTasksResponse_Task `protobuf:\"group,1,rep,name=Task\" json:\"task,omitempty\"`\n\tXXX_unrecognized []byte                              `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryTasksResponse) Reset()         { *m = TaskQueueQueryTasksResponse{} }\nfunc (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryTasksResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task {\n\tif m != nil {\n\t\treturn m.Task\n\t}\n\treturn nil\n}\n\ntype TaskQueueQueryTasksResponse_Task struct {\n\tTaskName         []byte                                          `protobuf:\"bytes,2,req,name=task_name\" json:\"task_name,omitempty\"`\n\tEtaUsec          *int64                                          `protobuf:\"varint,3,req,name=eta_usec\" json:\"eta_usec,omitempty\"`\n\tUrl              []byte                                          `protobuf:\"bytes,4,opt,name=url\" json:\"url,omitempty\"`\n\tMethod           *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:\"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod\" json:\"method,omitempty\"`\n\tRetryCount       *int32                                          `protobuf:\"varint,6,opt,name=retry_count,def=0\" json:\"retry_count,omitempty\"`\n\tHeader           []*TaskQueueQueryTasksResponse_Task_Header      `protobuf:\"group,7,rep,name=Header\" json:\"header,omitempty\"`\n\tBodySize         *int32                                          `protobuf:\"varint,10,opt,name=body_size\" json:\"body_size,omitempty\"`\n\tBody             []byte                                          `protobuf:\"bytes,11,opt,name=body\" json:\"body,omitempty\"`\n\tCreationTimeUsec *int64                                          `protobuf:\"varint,12,req,name=creation_time_usec\" json:\"creation_time_usec,omitempty\"`\n\tCrontimetable    *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:\"group,13,opt,name=CronTimetable\" json:\"crontimetable,omitempty\"`\n\tRunlog           *TaskQueueQueryTasksResponse_Task_RunLog        `protobuf:\"group,16,opt,name=RunLog\" json:\"runlog,omitempty\"`\n\tDescription      []byte                                          `protobuf:\"bytes,21,opt,name=description\" json:\"description,omitempty\"`\n\tPayload          *TaskPayload                                    `protobuf:\"bytes,22,opt,name=payload\" json:\"payload,omitempty\"`\n\tRetryParameters  *TaskQueueRetryParameters                       `protobuf:\"bytes,23,opt,name=retry_parameters\" json:\"retry_parameters,omitempty\"`\n\tFirstTryUsec     *int64                                          `protobuf:\"varint,24,opt,name=first_try_usec\" json:\"first_try_usec,omitempty\"`\n\tTag              []byte                                          `protobuf:\"bytes,25,opt,name=tag\" json:\"tag,omitempty\"`\n\tExecutionCount   *int32                                          `protobuf:\"varint,26,opt,name=execution_count,def=0\" json:\"execution_count,omitempty\"`\n\tXXX_unrecognized []byte                                          `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) Reset()         { *m = TaskQueueQueryTasksResponse_Task{} }\nfunc (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryTasksResponse_Task) ProtoMessage()    {}\n\nconst Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0\nconst Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte {\n\tif m != nil {\n\t\treturn m.TaskName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 {\n\tif m != nil && m.EtaUsec != nil {\n\t\treturn *m.EtaUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte {\n\tif m != nil {\n\t\treturn m.Url\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod {\n\tif m != nil && m.Method != nil {\n\t\treturn *m.Method\n\t}\n\treturn TaskQueueQueryTasksResponse_Task_GET\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 {\n\tif m != nil && m.RetryCount != nil {\n\t\treturn *m.RetryCount\n\t}\n\treturn Default_TaskQueueQueryTasksResponse_Task_RetryCount\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 {\n\tif m != nil && m.BodySize != nil {\n\t\treturn *m.BodySize\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte {\n\tif m != nil {\n\t\treturn m.Body\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 {\n\tif m != nil && m.CreationTimeUsec != nil {\n\t\treturn *m.CreationTimeUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable {\n\tif m != nil {\n\t\treturn m.Crontimetable\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog {\n\tif m != nil {\n\t\treturn m.Runlog\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte {\n\tif m != nil {\n\t\treturn m.Description\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters {\n\tif m != nil {\n\t\treturn m.RetryParameters\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 {\n\tif m != nil && m.FirstTryUsec != nil {\n\t\treturn *m.FirstTryUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte {\n\tif m != nil {\n\t\treturn m.Tag\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 {\n\tif m != nil && m.ExecutionCount != nil {\n\t\treturn *m.ExecutionCount\n\t}\n\treturn Default_TaskQueueQueryTasksResponse_Task_ExecutionCount\n}\n\ntype TaskQueueQueryTasksResponse_Task_Header struct {\n\tKey              []byte `protobuf:\"bytes,8,req,name=key\" json:\"key,omitempty\"`\n\tValue            []byte `protobuf:\"bytes,9,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_Header) Reset() {\n\t*m = TaskQueueQueryTasksResponse_Task_Header{}\n}\nfunc (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage()    {}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\ntype TaskQueueQueryTasksResponse_Task_CronTimetable struct {\n\tSchedule         []byte `protobuf:\"bytes,14,req,name=schedule\" json:\"schedule,omitempty\"`\n\tTimezone         []byte `protobuf:\"bytes,15,req,name=timezone\" json:\"timezone,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() {\n\t*m = TaskQueueQueryTasksResponse_Task_CronTimetable{}\n}\nfunc (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string {\n\treturn proto.CompactTextString(m)\n}\nfunc (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte {\n\tif m != nil {\n\t\treturn m.Schedule\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte {\n\tif m != nil {\n\t\treturn m.Timezone\n\t}\n\treturn nil\n}\n\ntype TaskQueueQueryTasksResponse_Task_RunLog struct {\n\tDispatchedUsec   *int64  `protobuf:\"varint,17,req,name=dispatched_usec\" json:\"dispatched_usec,omitempty\"`\n\tLagUsec          *int64  `protobuf:\"varint,18,req,name=lag_usec\" json:\"lag_usec,omitempty\"`\n\tElapsedUsec      *int64  `protobuf:\"varint,19,req,name=elapsed_usec\" json:\"elapsed_usec,omitempty\"`\n\tResponseCode     *int64  `protobuf:\"varint,20,opt,name=response_code\" json:\"response_code,omitempty\"`\n\tRetryReason      *string `protobuf:\"bytes,27,opt,name=retry_reason\" json:\"retry_reason,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() {\n\t*m = TaskQueueQueryTasksResponse_Task_RunLog{}\n}\nfunc (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage()    {}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 {\n\tif m != nil && m.DispatchedUsec != nil {\n\t\treturn *m.DispatchedUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 {\n\tif m != nil && m.LagUsec != nil {\n\t\treturn *m.LagUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 {\n\tif m != nil && m.ElapsedUsec != nil {\n\t\treturn *m.ElapsedUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 {\n\tif m != nil && m.ResponseCode != nil {\n\t\treturn *m.ResponseCode\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string {\n\tif m != nil && m.RetryReason != nil {\n\t\treturn *m.RetryReason\n\t}\n\treturn \"\"\n}\n\ntype TaskQueueFetchTaskRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,opt,name=app_id\" json:\"app_id,omitempty\"`\n\tQueueName        []byte `protobuf:\"bytes,2,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tTaskName         []byte `protobuf:\"bytes,3,req,name=task_name\" json:\"task_name,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchTaskRequest) Reset()         { *m = TaskQueueFetchTaskRequest{} }\nfunc (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueFetchTaskRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueFetchTaskRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchTaskRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueFetchTaskRequest) GetTaskName() []byte {\n\tif m != nil {\n\t\treturn m.TaskName\n\t}\n\treturn nil\n}\n\ntype TaskQueueFetchTaskResponse struct {\n\tTask             *TaskQueueQueryTasksResponse `protobuf:\"bytes,1,req,name=task\" json:\"task,omitempty\"`\n\tXXX_unrecognized []byte                       `json:\"-\"`\n}\n\nfunc (m *TaskQueueFetchTaskResponse) Reset()         { *m = TaskQueueFetchTaskResponse{} }\nfunc (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueFetchTaskResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse {\n\tif m != nil {\n\t\treturn m.Task\n\t}\n\treturn nil\n}\n\ntype TaskQueueUpdateStorageLimitRequest struct {\n\tAppId            []byte `protobuf:\"bytes,1,req,name=app_id\" json:\"app_id,omitempty\"`\n\tLimit            *int64 `protobuf:\"varint,2,req,name=limit\" json:\"limit,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueUpdateStorageLimitRequest) Reset()         { *m = TaskQueueUpdateStorageLimitRequest{} }\nfunc (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueUpdateStorageLimitRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte {\n\tif m != nil {\n\t\treturn m.AppId\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn 0\n}\n\ntype TaskQueueUpdateStorageLimitResponse struct {\n\tNewLimit         *int64 `protobuf:\"varint,1,req,name=new_limit\" json:\"new_limit,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueUpdateStorageLimitResponse) Reset()         { *m = TaskQueueUpdateStorageLimitResponse{} }\nfunc (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueUpdateStorageLimitResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 {\n\tif m != nil && m.NewLimit != nil {\n\t\treturn *m.NewLimit\n\t}\n\treturn 0\n}\n\ntype TaskQueueQueryAndOwnTasksRequest struct {\n\tQueueName        []byte   `protobuf:\"bytes,1,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tLeaseSeconds     *float64 `protobuf:\"fixed64,2,req,name=lease_seconds\" json:\"lease_seconds,omitempty\"`\n\tMaxTasks         *int64   `protobuf:\"varint,3,req,name=max_tasks\" json:\"max_tasks,omitempty\"`\n\tGroupByTag       *bool    `protobuf:\"varint,4,opt,name=group_by_tag,def=0\" json:\"group_by_tag,omitempty\"`\n\tTag              []byte   `protobuf:\"bytes,5,opt,name=tag\" json:\"tag,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksRequest) Reset()         { *m = TaskQueueQueryAndOwnTasksRequest{} }\nfunc (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage()    {}\n\nconst Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false\n\nfunc (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 {\n\tif m != nil && m.LeaseSeconds != nil {\n\t\treturn *m.LeaseSeconds\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 {\n\tif m != nil && m.MaxTasks != nil {\n\t\treturn *m.MaxTasks\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool {\n\tif m != nil && m.GroupByTag != nil {\n\t\treturn *m.GroupByTag\n\t}\n\treturn Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte {\n\tif m != nil {\n\t\treturn m.Tag\n\t}\n\treturn nil\n}\n\ntype TaskQueueQueryAndOwnTasksResponse struct {\n\tTask             []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:\"group,1,rep,name=Task\" json:\"task,omitempty\"`\n\tXXX_unrecognized []byte                                    `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse) Reset()         { *m = TaskQueueQueryAndOwnTasksResponse{} }\nfunc (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task {\n\tif m != nil {\n\t\treturn m.Task\n\t}\n\treturn nil\n}\n\ntype TaskQueueQueryAndOwnTasksResponse_Task struct {\n\tTaskName         []byte `protobuf:\"bytes,2,req,name=task_name\" json:\"task_name,omitempty\"`\n\tEtaUsec          *int64 `protobuf:\"varint,3,req,name=eta_usec\" json:\"eta_usec,omitempty\"`\n\tRetryCount       *int32 `protobuf:\"varint,4,opt,name=retry_count,def=0\" json:\"retry_count,omitempty\"`\n\tBody             []byte `protobuf:\"bytes,5,opt,name=body\" json:\"body,omitempty\"`\n\tTag              []byte `protobuf:\"bytes,6,opt,name=tag\" json:\"tag,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() {\n\t*m = TaskQueueQueryAndOwnTasksResponse_Task{}\n}\nfunc (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage()    {}\n\nconst Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte {\n\tif m != nil {\n\t\treturn m.TaskName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 {\n\tif m != nil && m.EtaUsec != nil {\n\t\treturn *m.EtaUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 {\n\tif m != nil && m.RetryCount != nil {\n\t\treturn *m.RetryCount\n\t}\n\treturn Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte {\n\tif m != nil {\n\t\treturn m.Body\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte {\n\tif m != nil {\n\t\treturn m.Tag\n\t}\n\treturn nil\n}\n\ntype TaskQueueModifyTaskLeaseRequest struct {\n\tQueueName        []byte   `protobuf:\"bytes,1,req,name=queue_name\" json:\"queue_name,omitempty\"`\n\tTaskName         []byte   `protobuf:\"bytes,2,req,name=task_name\" json:\"task_name,omitempty\"`\n\tEtaUsec          *int64   `protobuf:\"varint,3,req,name=eta_usec\" json:\"eta_usec,omitempty\"`\n\tLeaseSeconds     *float64 `protobuf:\"fixed64,4,req,name=lease_seconds\" json:\"lease_seconds,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *TaskQueueModifyTaskLeaseRequest) Reset()         { *m = TaskQueueModifyTaskLeaseRequest{} }\nfunc (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueModifyTaskLeaseRequest) ProtoMessage()    {}\n\nfunc (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte {\n\tif m != nil {\n\t\treturn m.QueueName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte {\n\tif m != nil {\n\t\treturn m.TaskName\n\t}\n\treturn nil\n}\n\nfunc (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 {\n\tif m != nil && m.EtaUsec != nil {\n\t\treturn *m.EtaUsec\n\t}\n\treturn 0\n}\n\nfunc (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 {\n\tif m != nil && m.LeaseSeconds != nil {\n\t\treturn *m.LeaseSeconds\n\t}\n\treturn 0\n}\n\ntype TaskQueueModifyTaskLeaseResponse struct {\n\tUpdatedEtaUsec   *int64 `protobuf:\"varint,1,req,name=updated_eta_usec\" json:\"updated_eta_usec,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *TaskQueueModifyTaskLeaseResponse) Reset()         { *m = TaskQueueModifyTaskLeaseResponse{} }\nfunc (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) }\nfunc (*TaskQueueModifyTaskLeaseResponse) ProtoMessage()    {}\n\nfunc (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 {\n\tif m != nil && m.UpdatedEtaUsec != nil {\n\t\treturn *m.UpdatedEtaUsec\n\t}\n\treturn 0\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"taskqueue\";\n\nimport \"google.golang.org/appengine/internal/datastore/datastore_v3.proto\";\n\npackage appengine;\n\nmessage TaskQueueServiceError {\n  enum ErrorCode {\n    OK = 0;\n    UNKNOWN_QUEUE = 1;\n    TRANSIENT_ERROR = 2;\n    INTERNAL_ERROR = 3;\n    TASK_TOO_LARGE = 4;\n    INVALID_TASK_NAME = 5;\n    INVALID_QUEUE_NAME = 6;\n    INVALID_URL = 7;\n    INVALID_QUEUE_RATE = 8;\n    PERMISSION_DENIED = 9;\n    TASK_ALREADY_EXISTS = 10;\n    TOMBSTONED_TASK = 11;\n    INVALID_ETA = 12;\n    INVALID_REQUEST = 13;\n    UNKNOWN_TASK = 14;\n    TOMBSTONED_QUEUE = 15;\n    DUPLICATE_TASK_NAME = 16;\n    SKIPPED = 17;\n    TOO_MANY_TASKS = 18;\n    INVALID_PAYLOAD = 19;\n    INVALID_RETRY_PARAMETERS = 20;\n    INVALID_QUEUE_MODE = 21;\n    ACL_LOOKUP_ERROR = 22;\n    TRANSACTIONAL_REQUEST_TOO_LARGE = 23;\n    INCORRECT_CREATOR_NAME = 24;\n    TASK_LEASE_EXPIRED = 25;\n    QUEUE_PAUSED = 26;\n    INVALID_TAG = 27;\n\n    // Reserved range for the Datastore error codes.\n    // Original Datastore error code is shifted by DATASTORE_ERROR offset.\n    DATASTORE_ERROR = 10000;\n  }\n}\n\nmessage TaskPayload {\n  extensions 10 to max;\n  option message_set_wire_format = true;\n}\n\nmessage TaskQueueRetryParameters {\n  optional int32 retry_limit = 1;\n  optional int64 age_limit_sec = 2;\n\n  optional double min_backoff_sec = 3 [default = 0.1];\n  optional double max_backoff_sec = 4 [default = 3600];\n  optional int32 max_doublings = 5 [default = 16];\n}\n\nmessage TaskQueueAcl {\n  repeated bytes user_email = 1;\n  repeated bytes writer_email = 2;\n}\n\nmessage TaskQueueHttpHeader {\n  required bytes key = 1;\n  required bytes value = 2;\n}\n\nmessage TaskQueueMode {\n  enum Mode {\n    PUSH = 0;\n    PULL = 1;\n  }\n}\n\nmessage TaskQueueAddRequest {\n  required bytes queue_name = 1;\n  required bytes task_name = 2;\n  required int64 eta_usec = 3;\n\n  enum RequestMethod {\n    GET = 1;\n    POST = 2;\n    HEAD = 3;\n    PUT = 4;\n    DELETE = 5;\n  }\n  optional RequestMethod method = 5 [default=POST];\n\n  optional bytes url = 4;\n\n  repeated group Header = 6 {\n    required bytes key = 7;\n    required bytes value = 8;\n  }\n\n  optional bytes body = 9 [ctype=CORD];\n  optional Transaction transaction = 10;\n  optional bytes app_id = 11;\n\n  optional group CronTimetable = 12 {\n    required bytes schedule = 13;\n    required bytes timezone = 14;\n  }\n\n  optional bytes description = 15;\n  optional TaskPayload payload = 16;\n  optional TaskQueueRetryParameters retry_parameters = 17;\n  optional TaskQueueMode.Mode mode = 18 [default=PUSH];\n  optional bytes tag = 19;\n}\n\nmessage TaskQueueAddResponse {\n  optional bytes chosen_task_name = 1;\n}\n\nmessage TaskQueueBulkAddRequest {\n  repeated TaskQueueAddRequest add_request = 1;\n}\n\nmessage TaskQueueBulkAddResponse {\n  repeated group TaskResult = 1 {\n    required TaskQueueServiceError.ErrorCode result = 2;\n    optional bytes chosen_task_name = 3;\n  }\n}\n\nmessage TaskQueueDeleteRequest {\n  required bytes queue_name = 1;\n  repeated bytes task_name = 2;\n  optional bytes app_id = 3;\n}\n\nmessage TaskQueueDeleteResponse {\n  repeated TaskQueueServiceError.ErrorCode result = 3;\n}\n\nmessage TaskQueueForceRunRequest {\n  optional bytes app_id = 1;\n  required bytes queue_name = 2;\n  required bytes task_name = 3;\n}\n\nmessage TaskQueueForceRunResponse {\n  required TaskQueueServiceError.ErrorCode result = 3;\n}\n\nmessage TaskQueueUpdateQueueRequest {\n  optional bytes app_id = 1;\n  required bytes queue_name = 2;\n  required double bucket_refill_per_second = 3;\n  required int32 bucket_capacity = 4;\n  optional string user_specified_rate = 5;\n  optional TaskQueueRetryParameters retry_parameters = 6;\n  optional int32 max_concurrent_requests = 7;\n  optional TaskQueueMode.Mode mode = 8 [default = PUSH];\n  optional TaskQueueAcl acl = 9;\n  repeated TaskQueueHttpHeader header_override = 10;\n}\n\nmessage TaskQueueUpdateQueueResponse {\n}\n\nmessage TaskQueueFetchQueuesRequest {\n  optional bytes app_id = 1;\n  required int32 max_rows = 2;\n}\n\nmessage TaskQueueFetchQueuesResponse {\n  repeated group Queue = 1 {\n    required bytes queue_name = 2;\n    required double bucket_refill_per_second = 3;\n    required double bucket_capacity = 4;\n    optional string user_specified_rate = 5;\n    required bool paused = 6 [default=false];\n    optional TaskQueueRetryParameters retry_parameters = 7;\n    optional int32 max_concurrent_requests = 8;\n    optional TaskQueueMode.Mode mode = 9 [default = PUSH];\n    optional TaskQueueAcl acl = 10;\n    repeated TaskQueueHttpHeader header_override = 11;\n    optional string creator_name = 12 [ctype=CORD, default=\"apphosting\"];\n  }\n}\n\nmessage TaskQueueFetchQueueStatsRequest {\n  optional bytes app_id = 1;\n  repeated bytes queue_name = 2;\n  optional int32 max_num_tasks = 3 [default = 0];\n}\n\nmessage TaskQueueScannerQueueInfo {\n  required int64 executed_last_minute = 1;\n  required int64 executed_last_hour = 2;\n  required double sampling_duration_seconds = 3;\n  optional int32 requests_in_flight = 4;\n  optional double enforced_rate = 5;\n}\n\nmessage TaskQueueFetchQueueStatsResponse {\n  repeated group QueueStats = 1 {\n    required int32 num_tasks = 2;\n    required int64 oldest_eta_usec = 3;\n    optional TaskQueueScannerQueueInfo scanner_info = 4;\n  }\n}\nmessage TaskQueuePauseQueueRequest {\n  required bytes app_id = 1;\n  required bytes queue_name = 2;\n  required bool pause = 3;\n}\n\nmessage TaskQueuePauseQueueResponse {\n}\n\nmessage TaskQueuePurgeQueueRequest {\n  optional bytes app_id = 1;\n  required bytes queue_name = 2;\n}\n\nmessage TaskQueuePurgeQueueResponse {\n}\n\nmessage TaskQueueDeleteQueueRequest {\n  required bytes app_id = 1;\n  required bytes queue_name = 2;\n}\n\nmessage TaskQueueDeleteQueueResponse {\n}\n\nmessage TaskQueueDeleteGroupRequest {\n  required bytes app_id = 1;\n}\n\nmessage TaskQueueDeleteGroupResponse {\n}\n\nmessage TaskQueueQueryTasksRequest {\n  optional bytes app_id = 1;\n  required bytes queue_name = 2;\n\n  optional bytes start_task_name = 3;\n  optional int64 start_eta_usec = 4;\n  optional bytes start_tag = 6;\n  optional int32 max_rows = 5 [default = 1];\n}\n\nmessage TaskQueueQueryTasksResponse {\n  repeated group Task = 1 {\n    required bytes task_name = 2;\n    required int64 eta_usec = 3;\n    optional bytes url = 4;\n\n    enum RequestMethod {\n      GET = 1;\n      POST = 2;\n      HEAD = 3;\n      PUT = 4;\n      DELETE = 5;\n    }\n    optional RequestMethod method = 5;\n\n    optional int32 retry_count = 6 [default=0];\n\n    repeated group Header = 7 {\n      required bytes key = 8;\n      required bytes value = 9;\n    }\n\n    optional int32 body_size = 10;\n    optional bytes body = 11 [ctype=CORD];\n    required int64 creation_time_usec = 12;\n\n    optional group CronTimetable = 13 {\n      required bytes schedule = 14;\n      required bytes timezone = 15;\n    }\n\n    optional group RunLog = 16 {\n      required int64 dispatched_usec = 17;\n      required int64 lag_usec = 18;\n      required int64 elapsed_usec = 19;\n      optional int64 response_code = 20;\n      optional string retry_reason = 27;\n    }\n\n    optional bytes description = 21;\n    optional TaskPayload payload = 22;\n    optional TaskQueueRetryParameters retry_parameters = 23;\n    optional int64 first_try_usec = 24;\n    optional bytes tag = 25;\n    optional int32 execution_count = 26 [default=0];\n  }\n}\n\nmessage TaskQueueFetchTaskRequest {\n  optional bytes app_id = 1;\n  required bytes queue_name = 2;\n  required bytes task_name = 3;\n}\n\nmessage TaskQueueFetchTaskResponse {\n  required TaskQueueQueryTasksResponse task = 1;\n}\n\nmessage TaskQueueUpdateStorageLimitRequest {\n  required bytes app_id = 1;\n  required int64 limit = 2;\n}\n\nmessage TaskQueueUpdateStorageLimitResponse {\n  required int64 new_limit = 1;\n}\n\nmessage TaskQueueQueryAndOwnTasksRequest {\n  required bytes queue_name = 1;\n  required double lease_seconds = 2;\n  required int64 max_tasks = 3;\n  optional bool group_by_tag = 4 [default=false];\n  optional bytes tag = 5;\n}\n\nmessage TaskQueueQueryAndOwnTasksResponse {\n  repeated group Task = 1 {\n    required bytes task_name = 2;\n    required int64 eta_usec = 3;\n    optional int32 retry_count = 4 [default=0];\n    optional bytes body = 5 [ctype=CORD];\n    optional bytes tag = 6;\n  }\n}\n\nmessage TaskQueueModifyTaskLeaseRequest {\n  required bytes queue_name = 1;\n  required bytes task_name = 2;\n  required int64 eta_usec = 3;\n  required double lease_seconds = 4;\n}\n\nmessage TaskQueueModifyTaskLeaseResponse {\n  required int64 updated_eta_usec = 1;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/transaction.go",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage internal\n\n// This file implements hooks for applying datastore transactions.\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tnetcontext \"golang.org/x/net/context\"\n\n\tbasepb \"google.golang.org/appengine/internal/base\"\n\tpb \"google.golang.org/appengine/internal/datastore\"\n)\n\nvar transactionSetters = make(map[reflect.Type]reflect.Value)\n\n// RegisterTransactionSetter registers a function that sets transaction information\n// in a protocol buffer message. f should be a function with two arguments,\n// the first being a protocol buffer type, and the second being *datastore.Transaction.\nfunc RegisterTransactionSetter(f interface{}) {\n\tv := reflect.ValueOf(f)\n\ttransactionSetters[v.Type().In(0)] = v\n}\n\n// applyTransaction applies the transaction t to message pb\n// by using the relevant setter passed to RegisterTransactionSetter.\nfunc applyTransaction(pb proto.Message, t *pb.Transaction) {\n\tv := reflect.ValueOf(pb)\n\tif f, ok := transactionSetters[v.Type()]; ok {\n\t\tf.Call([]reflect.Value{v, reflect.ValueOf(t)})\n\t}\n}\n\nvar transactionKey = \"used for *Transaction\"\n\nfunc transactionFromContext(ctx netcontext.Context) *transaction {\n\tt, _ := ctx.Value(&transactionKey).(*transaction)\n\treturn t\n}\n\nfunc withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {\n\treturn netcontext.WithValue(ctx, &transactionKey, t)\n}\n\ntype transaction struct {\n\ttransaction pb.Transaction\n\tfinished    bool\n}\n\nvar ErrConcurrentTransaction = errors.New(\"internal: concurrent transaction\")\n\nfunc RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {\n\tif transactionFromContext(c) != nil {\n\t\treturn errors.New(\"nested transactions are not supported\")\n\t}\n\n\t// Begin the transaction.\n\tt := &transaction{}\n\treq := &pb.BeginTransactionRequest{\n\t\tApp: proto.String(FullyQualifiedAppID(c)),\n\t}\n\tif xg {\n\t\treq.AllowMultipleEg = proto.Bool(true)\n\t}\n\tif err := Call(c, \"datastore_v3\", \"BeginTransaction\", req, &t.transaction); err != nil {\n\t\treturn err\n\t}\n\n\t// Call f, rolling back the transaction if f returns a non-nil error, or panics.\n\t// The panic is not recovered.\n\tdefer func() {\n\t\tif t.finished {\n\t\t\treturn\n\t\t}\n\t\tt.finished = true\n\t\t// Ignore the error return value, since we are already returning a non-nil\n\t\t// error (or we're panicking).\n\t\tCall(c, \"datastore_v3\", \"Rollback\", &t.transaction, &basepb.VoidProto{})\n\t}()\n\tif err := f(withTransaction(c, t)); err != nil {\n\t\treturn err\n\t}\n\tt.finished = true\n\n\t// Commit the transaction.\n\tres := &pb.CommitResponse{}\n\terr := Call(c, \"datastore_v3\", \"Commit\", &t.transaction, res)\n\tif ae, ok := err.(*APIError); ok {\n\t\t/* TODO: restore this conditional\n\t\tif appengine.IsDevAppServer() {\n\t\t*/\n\t\t// The Python Dev AppServer raises an ApplicationError with error code 2 (which is\n\t\t// Error.CONCURRENT_TRANSACTION) and message \"Concurrency exception.\".\n\t\tif ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == \"ApplicationError: 2 Concurrency exception.\" {\n\t\t\treturn ErrConcurrentTransaction\n\t\t}\n\t\tif ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {\n\t\t\treturn ErrConcurrentTransaction\n\t\t}\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto\n// DO NOT EDIT!\n\n/*\nPackage urlfetch is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/urlfetch/urlfetch_service.proto\n\nIt has these top-level messages:\n\tURLFetchServiceError\n\tURLFetchRequest\n\tURLFetchResponse\n*/\npackage urlfetch\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype URLFetchServiceError_ErrorCode int32\n\nconst (\n\tURLFetchServiceError_OK                       URLFetchServiceError_ErrorCode = 0\n\tURLFetchServiceError_INVALID_URL              URLFetchServiceError_ErrorCode = 1\n\tURLFetchServiceError_FETCH_ERROR              URLFetchServiceError_ErrorCode = 2\n\tURLFetchServiceError_UNSPECIFIED_ERROR        URLFetchServiceError_ErrorCode = 3\n\tURLFetchServiceError_RESPONSE_TOO_LARGE       URLFetchServiceError_ErrorCode = 4\n\tURLFetchServiceError_DEADLINE_EXCEEDED        URLFetchServiceError_ErrorCode = 5\n\tURLFetchServiceError_SSL_CERTIFICATE_ERROR    URLFetchServiceError_ErrorCode = 6\n\tURLFetchServiceError_DNS_ERROR                URLFetchServiceError_ErrorCode = 7\n\tURLFetchServiceError_CLOSED                   URLFetchServiceError_ErrorCode = 8\n\tURLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9\n\tURLFetchServiceError_TOO_MANY_REDIRECTS       URLFetchServiceError_ErrorCode = 10\n\tURLFetchServiceError_MALFORMED_REPLY          URLFetchServiceError_ErrorCode = 11\n\tURLFetchServiceError_CONNECTION_ERROR         URLFetchServiceError_ErrorCode = 12\n)\n\nvar URLFetchServiceError_ErrorCode_name = map[int32]string{\n\t0:  \"OK\",\n\t1:  \"INVALID_URL\",\n\t2:  \"FETCH_ERROR\",\n\t3:  \"UNSPECIFIED_ERROR\",\n\t4:  \"RESPONSE_TOO_LARGE\",\n\t5:  \"DEADLINE_EXCEEDED\",\n\t6:  \"SSL_CERTIFICATE_ERROR\",\n\t7:  \"DNS_ERROR\",\n\t8:  \"CLOSED\",\n\t9:  \"INTERNAL_TRANSIENT_ERROR\",\n\t10: \"TOO_MANY_REDIRECTS\",\n\t11: \"MALFORMED_REPLY\",\n\t12: \"CONNECTION_ERROR\",\n}\nvar URLFetchServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\":                       0,\n\t\"INVALID_URL\":              1,\n\t\"FETCH_ERROR\":              2,\n\t\"UNSPECIFIED_ERROR\":        3,\n\t\"RESPONSE_TOO_LARGE\":       4,\n\t\"DEADLINE_EXCEEDED\":        5,\n\t\"SSL_CERTIFICATE_ERROR\":    6,\n\t\"DNS_ERROR\":                7,\n\t\"CLOSED\":                   8,\n\t\"INTERNAL_TRANSIENT_ERROR\": 9,\n\t\"TOO_MANY_REDIRECTS\":       10,\n\t\"MALFORMED_REPLY\":          11,\n\t\"CONNECTION_ERROR\":         12,\n}\n\nfunc (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {\n\tp := new(URLFetchServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x URLFetchServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, \"URLFetchServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = URLFetchServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype URLFetchRequest_RequestMethod int32\n\nconst (\n\tURLFetchRequest_GET    URLFetchRequest_RequestMethod = 1\n\tURLFetchRequest_POST   URLFetchRequest_RequestMethod = 2\n\tURLFetchRequest_HEAD   URLFetchRequest_RequestMethod = 3\n\tURLFetchRequest_PUT    URLFetchRequest_RequestMethod = 4\n\tURLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5\n\tURLFetchRequest_PATCH  URLFetchRequest_RequestMethod = 6\n)\n\nvar URLFetchRequest_RequestMethod_name = map[int32]string{\n\t1: \"GET\",\n\t2: \"POST\",\n\t3: \"HEAD\",\n\t4: \"PUT\",\n\t5: \"DELETE\",\n\t6: \"PATCH\",\n}\nvar URLFetchRequest_RequestMethod_value = map[string]int32{\n\t\"GET\":    1,\n\t\"POST\":   2,\n\t\"HEAD\":   3,\n\t\"PUT\":    4,\n\t\"DELETE\": 5,\n\t\"PATCH\":  6,\n}\n\nfunc (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {\n\tp := new(URLFetchRequest_RequestMethod)\n\t*p = x\n\treturn p\n}\nfunc (x URLFetchRequest_RequestMethod) String() string {\n\treturn proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))\n}\nfunc (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, \"URLFetchRequest_RequestMethod\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = URLFetchRequest_RequestMethod(value)\n\treturn nil\n}\n\ntype URLFetchServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *URLFetchServiceError) Reset()         { *m = URLFetchServiceError{} }\nfunc (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*URLFetchServiceError) ProtoMessage()    {}\n\ntype URLFetchRequest struct {\n\tMethod                        *URLFetchRequest_RequestMethod `protobuf:\"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod\" json:\"Method,omitempty\"`\n\tUrl                           *string                        `protobuf:\"bytes,2,req,name=Url\" json:\"Url,omitempty\"`\n\tHeader                        []*URLFetchRequest_Header      `protobuf:\"group,3,rep,name=Header\" json:\"header,omitempty\"`\n\tPayload                       []byte                         `protobuf:\"bytes,6,opt,name=Payload\" json:\"Payload,omitempty\"`\n\tFollowRedirects               *bool                          `protobuf:\"varint,7,opt,name=FollowRedirects,def=1\" json:\"FollowRedirects,omitempty\"`\n\tDeadline                      *float64                       `protobuf:\"fixed64,8,opt,name=Deadline\" json:\"Deadline,omitempty\"`\n\tMustValidateServerCertificate *bool                          `protobuf:\"varint,9,opt,name=MustValidateServerCertificate,def=1\" json:\"MustValidateServerCertificate,omitempty\"`\n\tXXX_unrecognized              []byte                         `json:\"-\"`\n}\n\nfunc (m *URLFetchRequest) Reset()         { *m = URLFetchRequest{} }\nfunc (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }\nfunc (*URLFetchRequest) ProtoMessage()    {}\n\nconst Default_URLFetchRequest_FollowRedirects bool = true\nconst Default_URLFetchRequest_MustValidateServerCertificate bool = true\n\nfunc (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {\n\tif m != nil && m.Method != nil {\n\t\treturn *m.Method\n\t}\n\treturn URLFetchRequest_GET\n}\n\nfunc (m *URLFetchRequest) GetUrl() string {\n\tif m != nil && m.Url != nil {\n\t\treturn *m.Url\n\t}\n\treturn \"\"\n}\n\nfunc (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *URLFetchRequest) GetPayload() []byte {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc (m *URLFetchRequest) GetFollowRedirects() bool {\n\tif m != nil && m.FollowRedirects != nil {\n\t\treturn *m.FollowRedirects\n\t}\n\treturn Default_URLFetchRequest_FollowRedirects\n}\n\nfunc (m *URLFetchRequest) GetDeadline() float64 {\n\tif m != nil && m.Deadline != nil {\n\t\treturn *m.Deadline\n\t}\n\treturn 0\n}\n\nfunc (m *URLFetchRequest) GetMustValidateServerCertificate() bool {\n\tif m != nil && m.MustValidateServerCertificate != nil {\n\t\treturn *m.MustValidateServerCertificate\n\t}\n\treturn Default_URLFetchRequest_MustValidateServerCertificate\n}\n\ntype URLFetchRequest_Header struct {\n\tKey              *string `protobuf:\"bytes,4,req,name=Key\" json:\"Key,omitempty\"`\n\tValue            *string `protobuf:\"bytes,5,req,name=Value\" json:\"Value,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *URLFetchRequest_Header) Reset()         { *m = URLFetchRequest_Header{} }\nfunc (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }\nfunc (*URLFetchRequest_Header) ProtoMessage()    {}\n\nfunc (m *URLFetchRequest_Header) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *URLFetchRequest_Header) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\ntype URLFetchResponse struct {\n\tContent               []byte                     `protobuf:\"bytes,1,opt,name=Content\" json:\"Content,omitempty\"`\n\tStatusCode            *int32                     `protobuf:\"varint,2,req,name=StatusCode\" json:\"StatusCode,omitempty\"`\n\tHeader                []*URLFetchResponse_Header `protobuf:\"group,3,rep,name=Header\" json:\"header,omitempty\"`\n\tContentWasTruncated   *bool                      `protobuf:\"varint,6,opt,name=ContentWasTruncated,def=0\" json:\"ContentWasTruncated,omitempty\"`\n\tExternalBytesSent     *int64                     `protobuf:\"varint,7,opt,name=ExternalBytesSent\" json:\"ExternalBytesSent,omitempty\"`\n\tExternalBytesReceived *int64                     `protobuf:\"varint,8,opt,name=ExternalBytesReceived\" json:\"ExternalBytesReceived,omitempty\"`\n\tFinalUrl              *string                    `protobuf:\"bytes,9,opt,name=FinalUrl\" json:\"FinalUrl,omitempty\"`\n\tApiCpuMilliseconds    *int64                     `protobuf:\"varint,10,opt,name=ApiCpuMilliseconds,def=0\" json:\"ApiCpuMilliseconds,omitempty\"`\n\tApiBytesSent          *int64                     `protobuf:\"varint,11,opt,name=ApiBytesSent,def=0\" json:\"ApiBytesSent,omitempty\"`\n\tApiBytesReceived      *int64                     `protobuf:\"varint,12,opt,name=ApiBytesReceived,def=0\" json:\"ApiBytesReceived,omitempty\"`\n\tXXX_unrecognized      []byte                     `json:\"-\"`\n}\n\nfunc (m *URLFetchResponse) Reset()         { *m = URLFetchResponse{} }\nfunc (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }\nfunc (*URLFetchResponse) ProtoMessage()    {}\n\nconst Default_URLFetchResponse_ContentWasTruncated bool = false\nconst Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0\nconst Default_URLFetchResponse_ApiBytesSent int64 = 0\nconst Default_URLFetchResponse_ApiBytesReceived int64 = 0\n\nfunc (m *URLFetchResponse) GetContent() []byte {\n\tif m != nil {\n\t\treturn m.Content\n\t}\n\treturn nil\n}\n\nfunc (m *URLFetchResponse) GetStatusCode() int32 {\n\tif m != nil && m.StatusCode != nil {\n\t\treturn *m.StatusCode\n\t}\n\treturn 0\n}\n\nfunc (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {\n\tif m != nil {\n\t\treturn m.Header\n\t}\n\treturn nil\n}\n\nfunc (m *URLFetchResponse) GetContentWasTruncated() bool {\n\tif m != nil && m.ContentWasTruncated != nil {\n\t\treturn *m.ContentWasTruncated\n\t}\n\treturn Default_URLFetchResponse_ContentWasTruncated\n}\n\nfunc (m *URLFetchResponse) GetExternalBytesSent() int64 {\n\tif m != nil && m.ExternalBytesSent != nil {\n\t\treturn *m.ExternalBytesSent\n\t}\n\treturn 0\n}\n\nfunc (m *URLFetchResponse) GetExternalBytesReceived() int64 {\n\tif m != nil && m.ExternalBytesReceived != nil {\n\t\treturn *m.ExternalBytesReceived\n\t}\n\treturn 0\n}\n\nfunc (m *URLFetchResponse) GetFinalUrl() string {\n\tif m != nil && m.FinalUrl != nil {\n\t\treturn *m.FinalUrl\n\t}\n\treturn \"\"\n}\n\nfunc (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {\n\tif m != nil && m.ApiCpuMilliseconds != nil {\n\t\treturn *m.ApiCpuMilliseconds\n\t}\n\treturn Default_URLFetchResponse_ApiCpuMilliseconds\n}\n\nfunc (m *URLFetchResponse) GetApiBytesSent() int64 {\n\tif m != nil && m.ApiBytesSent != nil {\n\t\treturn *m.ApiBytesSent\n\t}\n\treturn Default_URLFetchResponse_ApiBytesSent\n}\n\nfunc (m *URLFetchResponse) GetApiBytesReceived() int64 {\n\tif m != nil && m.ApiBytesReceived != nil {\n\t\treturn *m.ApiBytesReceived\n\t}\n\treturn Default_URLFetchResponse_ApiBytesReceived\n}\n\ntype URLFetchResponse_Header struct {\n\tKey              *string `protobuf:\"bytes,4,req,name=Key\" json:\"Key,omitempty\"`\n\tValue            *string `protobuf:\"bytes,5,req,name=Value\" json:\"Value,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *URLFetchResponse_Header) Reset()         { *m = URLFetchResponse_Header{} }\nfunc (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }\nfunc (*URLFetchResponse_Header) ProtoMessage()    {}\n\nfunc (m *URLFetchResponse_Header) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *URLFetchResponse_Header) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"urlfetch\";\n\npackage appengine;\n\nmessage URLFetchServiceError {\n  enum ErrorCode {\n    OK = 0;\n    INVALID_URL = 1;\n    FETCH_ERROR = 2;\n    UNSPECIFIED_ERROR = 3;\n    RESPONSE_TOO_LARGE = 4;\n    DEADLINE_EXCEEDED = 5;\n    SSL_CERTIFICATE_ERROR = 6;\n    DNS_ERROR = 7;\n    CLOSED = 8;\n    INTERNAL_TRANSIENT_ERROR = 9;\n    TOO_MANY_REDIRECTS = 10;\n    MALFORMED_REPLY = 11;\n    CONNECTION_ERROR = 12;\n  }\n}\n\nmessage URLFetchRequest {\n  enum RequestMethod {\n    GET = 1;\n    POST = 2;\n    HEAD = 3;\n    PUT = 4;\n    DELETE = 5;\n    PATCH = 6;\n  }\n  required RequestMethod Method = 1;\n  required string Url = 2;\n  repeated group Header = 3 {\n    required string Key = 4;\n    required string Value = 5;\n  }\n  optional bytes Payload = 6 [ctype=CORD];\n\n  optional bool FollowRedirects = 7 [default=true];\n\n  optional double Deadline = 8;\n\n  optional bool MustValidateServerCertificate = 9 [default=true];\n}\n\nmessage URLFetchResponse {\n  optional bytes Content = 1;\n  required int32 StatusCode = 2;\n  repeated group Header = 3 {\n    required string Key = 4;\n    required string Value = 5;\n  }\n  optional bool ContentWasTruncated = 6 [default=false];\n  optional int64 ExternalBytesSent = 7;\n  optional int64 ExternalBytesReceived = 8;\n\n  optional string FinalUrl = 9;\n\n  optional int64 ApiCpuMilliseconds = 10 [default=0];\n  optional int64 ApiBytesSent = 11 [default=0];\n  optional int64 ApiBytesReceived = 12 [default=0];\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/user/user_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/user/user_service.proto\n// DO NOT EDIT!\n\n/*\nPackage user is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/user/user_service.proto\n\nIt has these top-level messages:\n\tUserServiceError\n\tCreateLoginURLRequest\n\tCreateLoginURLResponse\n\tCreateLogoutURLRequest\n\tCreateLogoutURLResponse\n\tGetOAuthUserRequest\n\tGetOAuthUserResponse\n\tCheckOAuthSignatureRequest\n\tCheckOAuthSignatureResponse\n*/\npackage user\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype UserServiceError_ErrorCode int32\n\nconst (\n\tUserServiceError_OK                    UserServiceError_ErrorCode = 0\n\tUserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1\n\tUserServiceError_NOT_ALLOWED           UserServiceError_ErrorCode = 2\n\tUserServiceError_OAUTH_INVALID_TOKEN   UserServiceError_ErrorCode = 3\n\tUserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4\n\tUserServiceError_OAUTH_ERROR           UserServiceError_ErrorCode = 5\n)\n\nvar UserServiceError_ErrorCode_name = map[int32]string{\n\t0: \"OK\",\n\t1: \"REDIRECT_URL_TOO_LONG\",\n\t2: \"NOT_ALLOWED\",\n\t3: \"OAUTH_INVALID_TOKEN\",\n\t4: \"OAUTH_INVALID_REQUEST\",\n\t5: \"OAUTH_ERROR\",\n}\nvar UserServiceError_ErrorCode_value = map[string]int32{\n\t\"OK\": 0,\n\t\"REDIRECT_URL_TOO_LONG\": 1,\n\t\"NOT_ALLOWED\":           2,\n\t\"OAUTH_INVALID_TOKEN\":   3,\n\t\"OAUTH_INVALID_REQUEST\": 4,\n\t\"OAUTH_ERROR\":           5,\n}\n\nfunc (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode {\n\tp := new(UserServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x UserServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(UserServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, \"UserServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = UserServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype UserServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *UserServiceError) Reset()         { *m = UserServiceError{} }\nfunc (m *UserServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*UserServiceError) ProtoMessage()    {}\n\ntype CreateLoginURLRequest struct {\n\tDestinationUrl    *string `protobuf:\"bytes,1,req,name=destination_url\" json:\"destination_url,omitempty\"`\n\tAuthDomain        *string `protobuf:\"bytes,2,opt,name=auth_domain\" json:\"auth_domain,omitempty\"`\n\tFederatedIdentity *string `protobuf:\"bytes,3,opt,name=federated_identity,def=\" json:\"federated_identity,omitempty\"`\n\tXXX_unrecognized  []byte  `json:\"-\"`\n}\n\nfunc (m *CreateLoginURLRequest) Reset()         { *m = CreateLoginURLRequest{} }\nfunc (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateLoginURLRequest) ProtoMessage()    {}\n\nfunc (m *CreateLoginURLRequest) GetDestinationUrl() string {\n\tif m != nil && m.DestinationUrl != nil {\n\t\treturn *m.DestinationUrl\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateLoginURLRequest) GetAuthDomain() string {\n\tif m != nil && m.AuthDomain != nil {\n\t\treturn *m.AuthDomain\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateLoginURLRequest) GetFederatedIdentity() string {\n\tif m != nil && m.FederatedIdentity != nil {\n\t\treturn *m.FederatedIdentity\n\t}\n\treturn \"\"\n}\n\ntype CreateLoginURLResponse struct {\n\tLoginUrl         *string `protobuf:\"bytes,1,req,name=login_url\" json:\"login_url,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateLoginURLResponse) Reset()         { *m = CreateLoginURLResponse{} }\nfunc (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CreateLoginURLResponse) ProtoMessage()    {}\n\nfunc (m *CreateLoginURLResponse) GetLoginUrl() string {\n\tif m != nil && m.LoginUrl != nil {\n\t\treturn *m.LoginUrl\n\t}\n\treturn \"\"\n}\n\ntype CreateLogoutURLRequest struct {\n\tDestinationUrl   *string `protobuf:\"bytes,1,req,name=destination_url\" json:\"destination_url,omitempty\"`\n\tAuthDomain       *string `protobuf:\"bytes,2,opt,name=auth_domain\" json:\"auth_domain,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateLogoutURLRequest) Reset()         { *m = CreateLogoutURLRequest{} }\nfunc (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateLogoutURLRequest) ProtoMessage()    {}\n\nfunc (m *CreateLogoutURLRequest) GetDestinationUrl() string {\n\tif m != nil && m.DestinationUrl != nil {\n\t\treturn *m.DestinationUrl\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateLogoutURLRequest) GetAuthDomain() string {\n\tif m != nil && m.AuthDomain != nil {\n\t\treturn *m.AuthDomain\n\t}\n\treturn \"\"\n}\n\ntype CreateLogoutURLResponse struct {\n\tLogoutUrl        *string `protobuf:\"bytes,1,req,name=logout_url\" json:\"logout_url,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateLogoutURLResponse) Reset()         { *m = CreateLogoutURLResponse{} }\nfunc (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CreateLogoutURLResponse) ProtoMessage()    {}\n\nfunc (m *CreateLogoutURLResponse) GetLogoutUrl() string {\n\tif m != nil && m.LogoutUrl != nil {\n\t\treturn *m.LogoutUrl\n\t}\n\treturn \"\"\n}\n\ntype GetOAuthUserRequest struct {\n\tScope            *string  `protobuf:\"bytes,1,opt,name=scope\" json:\"scope,omitempty\"`\n\tScopes           []string `protobuf:\"bytes,2,rep,name=scopes\" json:\"scopes,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *GetOAuthUserRequest) Reset()         { *m = GetOAuthUserRequest{} }\nfunc (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetOAuthUserRequest) ProtoMessage()    {}\n\nfunc (m *GetOAuthUserRequest) GetScope() string {\n\tif m != nil && m.Scope != nil {\n\t\treturn *m.Scope\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetOAuthUserRequest) GetScopes() []string {\n\tif m != nil {\n\t\treturn m.Scopes\n\t}\n\treturn nil\n}\n\ntype GetOAuthUserResponse struct {\n\tEmail            *string  `protobuf:\"bytes,1,req,name=email\" json:\"email,omitempty\"`\n\tUserId           *string  `protobuf:\"bytes,2,req,name=user_id\" json:\"user_id,omitempty\"`\n\tAuthDomain       *string  `protobuf:\"bytes,3,req,name=auth_domain\" json:\"auth_domain,omitempty\"`\n\tUserOrganization *string  `protobuf:\"bytes,4,opt,name=user_organization,def=\" json:\"user_organization,omitempty\"`\n\tIsAdmin          *bool    `protobuf:\"varint,5,opt,name=is_admin,def=0\" json:\"is_admin,omitempty\"`\n\tClientId         *string  `protobuf:\"bytes,6,opt,name=client_id,def=\" json:\"client_id,omitempty\"`\n\tScopes           []string `protobuf:\"bytes,7,rep,name=scopes\" json:\"scopes,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *GetOAuthUserResponse) Reset()         { *m = GetOAuthUserResponse{} }\nfunc (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) }\nfunc (*GetOAuthUserResponse) ProtoMessage()    {}\n\nconst Default_GetOAuthUserResponse_IsAdmin bool = false\n\nfunc (m *GetOAuthUserResponse) GetEmail() string {\n\tif m != nil && m.Email != nil {\n\t\treturn *m.Email\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetOAuthUserResponse) GetUserId() string {\n\tif m != nil && m.UserId != nil {\n\t\treturn *m.UserId\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetOAuthUserResponse) GetAuthDomain() string {\n\tif m != nil && m.AuthDomain != nil {\n\t\treturn *m.AuthDomain\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetOAuthUserResponse) GetUserOrganization() string {\n\tif m != nil && m.UserOrganization != nil {\n\t\treturn *m.UserOrganization\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetOAuthUserResponse) GetIsAdmin() bool {\n\tif m != nil && m.IsAdmin != nil {\n\t\treturn *m.IsAdmin\n\t}\n\treturn Default_GetOAuthUserResponse_IsAdmin\n}\n\nfunc (m *GetOAuthUserResponse) GetClientId() string {\n\tif m != nil && m.ClientId != nil {\n\t\treturn *m.ClientId\n\t}\n\treturn \"\"\n}\n\nfunc (m *GetOAuthUserResponse) GetScopes() []string {\n\tif m != nil {\n\t\treturn m.Scopes\n\t}\n\treturn nil\n}\n\ntype CheckOAuthSignatureRequest struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *CheckOAuthSignatureRequest) Reset()         { *m = CheckOAuthSignatureRequest{} }\nfunc (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CheckOAuthSignatureRequest) ProtoMessage()    {}\n\ntype CheckOAuthSignatureResponse struct {\n\tOauthConsumerKey *string `protobuf:\"bytes,1,req,name=oauth_consumer_key\" json:\"oauth_consumer_key,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CheckOAuthSignatureResponse) Reset()         { *m = CheckOAuthSignatureResponse{} }\nfunc (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CheckOAuthSignatureResponse) ProtoMessage()    {}\n\nfunc (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string {\n\tif m != nil && m.OauthConsumerKey != nil {\n\t\treturn *m.OauthConsumerKey\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/user/user_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"user\";\n\npackage appengine;\n\nmessage UserServiceError {\n  enum ErrorCode {\n    OK = 0;\n    REDIRECT_URL_TOO_LONG = 1;\n    NOT_ALLOWED = 2;\n    OAUTH_INVALID_TOKEN = 3;\n    OAUTH_INVALID_REQUEST = 4;\n    OAUTH_ERROR = 5;\n  }\n}\n\nmessage CreateLoginURLRequest {\n  required string destination_url = 1;\n  optional string auth_domain = 2;\n  optional string federated_identity = 3 [default = \"\"];\n}\n\nmessage CreateLoginURLResponse {\n  required string login_url = 1;\n}\n\nmessage CreateLogoutURLRequest {\n  required string destination_url = 1;\n  optional string auth_domain = 2;\n}\n\nmessage CreateLogoutURLResponse {\n  required string logout_url = 1;\n}\n\nmessage GetOAuthUserRequest {\n  optional string scope = 1;\n\n  repeated string scopes = 2;\n}\n\nmessage GetOAuthUserResponse {\n  required string email = 1;\n  required string user_id = 2;\n  required string auth_domain = 3;\n  optional string user_organization = 4 [default = \"\"];\n  optional bool is_admin = 5 [default = false];\n  optional string client_id = 6 [default = \"\"];\n\n  repeated string scopes = 7;\n}\n\nmessage CheckOAuthSignatureRequest {\n}\n\nmessage CheckOAuthSignatureResponse {\n  required string oauth_consumer_key = 1;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto\n// DO NOT EDIT!\n\n/*\nPackage xmpp is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/appengine/internal/xmpp/xmpp_service.proto\n\nIt has these top-level messages:\n\tXmppServiceError\n\tPresenceRequest\n\tPresenceResponse\n\tBulkPresenceRequest\n\tBulkPresenceResponse\n\tXmppMessageRequest\n\tXmppMessageResponse\n\tXmppSendPresenceRequest\n\tXmppSendPresenceResponse\n\tXmppInviteRequest\n\tXmppInviteResponse\n*/\npackage xmpp\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype XmppServiceError_ErrorCode int32\n\nconst (\n\tXmppServiceError_UNSPECIFIED_ERROR    XmppServiceError_ErrorCode = 1\n\tXmppServiceError_INVALID_JID          XmppServiceError_ErrorCode = 2\n\tXmppServiceError_NO_BODY              XmppServiceError_ErrorCode = 3\n\tXmppServiceError_INVALID_XML          XmppServiceError_ErrorCode = 4\n\tXmppServiceError_INVALID_TYPE         XmppServiceError_ErrorCode = 5\n\tXmppServiceError_INVALID_SHOW         XmppServiceError_ErrorCode = 6\n\tXmppServiceError_EXCEEDED_MAX_SIZE    XmppServiceError_ErrorCode = 7\n\tXmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8\n\tXmppServiceError_NONDEFAULT_MODULE    XmppServiceError_ErrorCode = 9\n)\n\nvar XmppServiceError_ErrorCode_name = map[int32]string{\n\t1: \"UNSPECIFIED_ERROR\",\n\t2: \"INVALID_JID\",\n\t3: \"NO_BODY\",\n\t4: \"INVALID_XML\",\n\t5: \"INVALID_TYPE\",\n\t6: \"INVALID_SHOW\",\n\t7: \"EXCEEDED_MAX_SIZE\",\n\t8: \"APPID_ALIAS_REQUIRED\",\n\t9: \"NONDEFAULT_MODULE\",\n}\nvar XmppServiceError_ErrorCode_value = map[string]int32{\n\t\"UNSPECIFIED_ERROR\":    1,\n\t\"INVALID_JID\":          2,\n\t\"NO_BODY\":              3,\n\t\"INVALID_XML\":          4,\n\t\"INVALID_TYPE\":         5,\n\t\"INVALID_SHOW\":         6,\n\t\"EXCEEDED_MAX_SIZE\":    7,\n\t\"APPID_ALIAS_REQUIRED\": 8,\n\t\"NONDEFAULT_MODULE\":    9,\n}\n\nfunc (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode {\n\tp := new(XmppServiceError_ErrorCode)\n\t*p = x\n\treturn p\n}\nfunc (x XmppServiceError_ErrorCode) String() string {\n\treturn proto.EnumName(XmppServiceError_ErrorCode_name, int32(x))\n}\nfunc (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, \"XmppServiceError_ErrorCode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = XmppServiceError_ErrorCode(value)\n\treturn nil\n}\n\ntype PresenceResponse_SHOW int32\n\nconst (\n\tPresenceResponse_NORMAL         PresenceResponse_SHOW = 0\n\tPresenceResponse_AWAY           PresenceResponse_SHOW = 1\n\tPresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2\n\tPresenceResponse_CHAT           PresenceResponse_SHOW = 3\n\tPresenceResponse_EXTENDED_AWAY  PresenceResponse_SHOW = 4\n)\n\nvar PresenceResponse_SHOW_name = map[int32]string{\n\t0: \"NORMAL\",\n\t1: \"AWAY\",\n\t2: \"DO_NOT_DISTURB\",\n\t3: \"CHAT\",\n\t4: \"EXTENDED_AWAY\",\n}\nvar PresenceResponse_SHOW_value = map[string]int32{\n\t\"NORMAL\":         0,\n\t\"AWAY\":           1,\n\t\"DO_NOT_DISTURB\": 2,\n\t\"CHAT\":           3,\n\t\"EXTENDED_AWAY\":  4,\n}\n\nfunc (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW {\n\tp := new(PresenceResponse_SHOW)\n\t*p = x\n\treturn p\n}\nfunc (x PresenceResponse_SHOW) String() string {\n\treturn proto.EnumName(PresenceResponse_SHOW_name, int32(x))\n}\nfunc (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, \"PresenceResponse_SHOW\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PresenceResponse_SHOW(value)\n\treturn nil\n}\n\ntype XmppMessageResponse_XmppMessageStatus int32\n\nconst (\n\tXmppMessageResponse_NO_ERROR    XmppMessageResponse_XmppMessageStatus = 0\n\tXmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1\n\tXmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2\n)\n\nvar XmppMessageResponse_XmppMessageStatus_name = map[int32]string{\n\t0: \"NO_ERROR\",\n\t1: \"INVALID_JID\",\n\t2: \"OTHER_ERROR\",\n}\nvar XmppMessageResponse_XmppMessageStatus_value = map[string]int32{\n\t\"NO_ERROR\":    0,\n\t\"INVALID_JID\": 1,\n\t\"OTHER_ERROR\": 2,\n}\n\nfunc (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus {\n\tp := new(XmppMessageResponse_XmppMessageStatus)\n\t*p = x\n\treturn p\n}\nfunc (x XmppMessageResponse_XmppMessageStatus) String() string {\n\treturn proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x))\n}\nfunc (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, \"XmppMessageResponse_XmppMessageStatus\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = XmppMessageResponse_XmppMessageStatus(value)\n\treturn nil\n}\n\ntype XmppServiceError struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *XmppServiceError) Reset()         { *m = XmppServiceError{} }\nfunc (m *XmppServiceError) String() string { return proto.CompactTextString(m) }\nfunc (*XmppServiceError) ProtoMessage()    {}\n\ntype PresenceRequest struct {\n\tJid              *string `protobuf:\"bytes,1,req,name=jid\" json:\"jid,omitempty\"`\n\tFromJid          *string `protobuf:\"bytes,2,opt,name=from_jid\" json:\"from_jid,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *PresenceRequest) Reset()         { *m = PresenceRequest{} }\nfunc (m *PresenceRequest) String() string { return proto.CompactTextString(m) }\nfunc (*PresenceRequest) ProtoMessage()    {}\n\nfunc (m *PresenceRequest) GetJid() string {\n\tif m != nil && m.Jid != nil {\n\t\treturn *m.Jid\n\t}\n\treturn \"\"\n}\n\nfunc (m *PresenceRequest) GetFromJid() string {\n\tif m != nil && m.FromJid != nil {\n\t\treturn *m.FromJid\n\t}\n\treturn \"\"\n}\n\ntype PresenceResponse struct {\n\tIsAvailable      *bool                  `protobuf:\"varint,1,req,name=is_available\" json:\"is_available,omitempty\"`\n\tPresence         *PresenceResponse_SHOW `protobuf:\"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW\" json:\"presence,omitempty\"`\n\tValid            *bool                  `protobuf:\"varint,3,opt,name=valid\" json:\"valid,omitempty\"`\n\tXXX_unrecognized []byte                 `json:\"-\"`\n}\n\nfunc (m *PresenceResponse) Reset()         { *m = PresenceResponse{} }\nfunc (m *PresenceResponse) String() string { return proto.CompactTextString(m) }\nfunc (*PresenceResponse) ProtoMessage()    {}\n\nfunc (m *PresenceResponse) GetIsAvailable() bool {\n\tif m != nil && m.IsAvailable != nil {\n\t\treturn *m.IsAvailable\n\t}\n\treturn false\n}\n\nfunc (m *PresenceResponse) GetPresence() PresenceResponse_SHOW {\n\tif m != nil && m.Presence != nil {\n\t\treturn *m.Presence\n\t}\n\treturn PresenceResponse_NORMAL\n}\n\nfunc (m *PresenceResponse) GetValid() bool {\n\tif m != nil && m.Valid != nil {\n\t\treturn *m.Valid\n\t}\n\treturn false\n}\n\ntype BulkPresenceRequest struct {\n\tJid              []string `protobuf:\"bytes,1,rep,name=jid\" json:\"jid,omitempty\"`\n\tFromJid          *string  `protobuf:\"bytes,2,opt,name=from_jid\" json:\"from_jid,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *BulkPresenceRequest) Reset()         { *m = BulkPresenceRequest{} }\nfunc (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) }\nfunc (*BulkPresenceRequest) ProtoMessage()    {}\n\nfunc (m *BulkPresenceRequest) GetJid() []string {\n\tif m != nil {\n\t\treturn m.Jid\n\t}\n\treturn nil\n}\n\nfunc (m *BulkPresenceRequest) GetFromJid() string {\n\tif m != nil && m.FromJid != nil {\n\t\treturn *m.FromJid\n\t}\n\treturn \"\"\n}\n\ntype BulkPresenceResponse struct {\n\tPresenceResponse []*PresenceResponse `protobuf:\"bytes,1,rep,name=presence_response\" json:\"presence_response,omitempty\"`\n\tXXX_unrecognized []byte              `json:\"-\"`\n}\n\nfunc (m *BulkPresenceResponse) Reset()         { *m = BulkPresenceResponse{} }\nfunc (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) }\nfunc (*BulkPresenceResponse) ProtoMessage()    {}\n\nfunc (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse {\n\tif m != nil {\n\t\treturn m.PresenceResponse\n\t}\n\treturn nil\n}\n\ntype XmppMessageRequest struct {\n\tJid              []string `protobuf:\"bytes,1,rep,name=jid\" json:\"jid,omitempty\"`\n\tBody             *string  `protobuf:\"bytes,2,req,name=body\" json:\"body,omitempty\"`\n\tRawXml           *bool    `protobuf:\"varint,3,opt,name=raw_xml,def=0\" json:\"raw_xml,omitempty\"`\n\tType             *string  `protobuf:\"bytes,4,opt,name=type,def=chat\" json:\"type,omitempty\"`\n\tFromJid          *string  `protobuf:\"bytes,5,opt,name=from_jid\" json:\"from_jid,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *XmppMessageRequest) Reset()         { *m = XmppMessageRequest{} }\nfunc (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) }\nfunc (*XmppMessageRequest) ProtoMessage()    {}\n\nconst Default_XmppMessageRequest_RawXml bool = false\nconst Default_XmppMessageRequest_Type string = \"chat\"\n\nfunc (m *XmppMessageRequest) GetJid() []string {\n\tif m != nil {\n\t\treturn m.Jid\n\t}\n\treturn nil\n}\n\nfunc (m *XmppMessageRequest) GetBody() string {\n\tif m != nil && m.Body != nil {\n\t\treturn *m.Body\n\t}\n\treturn \"\"\n}\n\nfunc (m *XmppMessageRequest) GetRawXml() bool {\n\tif m != nil && m.RawXml != nil {\n\t\treturn *m.RawXml\n\t}\n\treturn Default_XmppMessageRequest_RawXml\n}\n\nfunc (m *XmppMessageRequest) GetType() string {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn Default_XmppMessageRequest_Type\n}\n\nfunc (m *XmppMessageRequest) GetFromJid() string {\n\tif m != nil && m.FromJid != nil {\n\t\treturn *m.FromJid\n\t}\n\treturn \"\"\n}\n\ntype XmppMessageResponse struct {\n\tStatus           []XmppMessageResponse_XmppMessageStatus `protobuf:\"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus\" json:\"status,omitempty\"`\n\tXXX_unrecognized []byte                                  `json:\"-\"`\n}\n\nfunc (m *XmppMessageResponse) Reset()         { *m = XmppMessageResponse{} }\nfunc (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) }\nfunc (*XmppMessageResponse) ProtoMessage()    {}\n\nfunc (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus {\n\tif m != nil {\n\t\treturn m.Status\n\t}\n\treturn nil\n}\n\ntype XmppSendPresenceRequest struct {\n\tJid              *string `protobuf:\"bytes,1,req,name=jid\" json:\"jid,omitempty\"`\n\tType             *string `protobuf:\"bytes,2,opt,name=type\" json:\"type,omitempty\"`\n\tShow             *string `protobuf:\"bytes,3,opt,name=show\" json:\"show,omitempty\"`\n\tStatus           *string `protobuf:\"bytes,4,opt,name=status\" json:\"status,omitempty\"`\n\tFromJid          *string `protobuf:\"bytes,5,opt,name=from_jid\" json:\"from_jid,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *XmppSendPresenceRequest) Reset()         { *m = XmppSendPresenceRequest{} }\nfunc (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) }\nfunc (*XmppSendPresenceRequest) ProtoMessage()    {}\n\nfunc (m *XmppSendPresenceRequest) GetJid() string {\n\tif m != nil && m.Jid != nil {\n\t\treturn *m.Jid\n\t}\n\treturn \"\"\n}\n\nfunc (m *XmppSendPresenceRequest) GetType() string {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn \"\"\n}\n\nfunc (m *XmppSendPresenceRequest) GetShow() string {\n\tif m != nil && m.Show != nil {\n\t\treturn *m.Show\n\t}\n\treturn \"\"\n}\n\nfunc (m *XmppSendPresenceRequest) GetStatus() string {\n\tif m != nil && m.Status != nil {\n\t\treturn *m.Status\n\t}\n\treturn \"\"\n}\n\nfunc (m *XmppSendPresenceRequest) GetFromJid() string {\n\tif m != nil && m.FromJid != nil {\n\t\treturn *m.FromJid\n\t}\n\treturn \"\"\n}\n\ntype XmppSendPresenceResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *XmppSendPresenceResponse) Reset()         { *m = XmppSendPresenceResponse{} }\nfunc (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) }\nfunc (*XmppSendPresenceResponse) ProtoMessage()    {}\n\ntype XmppInviteRequest struct {\n\tJid              *string `protobuf:\"bytes,1,req,name=jid\" json:\"jid,omitempty\"`\n\tFromJid          *string `protobuf:\"bytes,2,opt,name=from_jid\" json:\"from_jid,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *XmppInviteRequest) Reset()         { *m = XmppInviteRequest{} }\nfunc (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) }\nfunc (*XmppInviteRequest) ProtoMessage()    {}\n\nfunc (m *XmppInviteRequest) GetJid() string {\n\tif m != nil && m.Jid != nil {\n\t\treturn *m.Jid\n\t}\n\treturn \"\"\n}\n\nfunc (m *XmppInviteRequest) GetFromJid() string {\n\tif m != nil && m.FromJid != nil {\n\t\treturn *m.FromJid\n\t}\n\treturn \"\"\n}\n\ntype XmppInviteResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *XmppInviteResponse) Reset()         { *m = XmppInviteResponse{} }\nfunc (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) }\nfunc (*XmppInviteResponse) ProtoMessage()    {}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto",
    "content": "syntax = \"proto2\";\noption go_package = \"xmpp\";\n\npackage appengine;\n\nmessage XmppServiceError {\n  enum ErrorCode {\n    UNSPECIFIED_ERROR = 1;\n    INVALID_JID = 2;\n    NO_BODY = 3;\n    INVALID_XML = 4;\n    INVALID_TYPE = 5;\n    INVALID_SHOW = 6;\n    EXCEEDED_MAX_SIZE = 7;\n    APPID_ALIAS_REQUIRED = 8;\n    NONDEFAULT_MODULE = 9;\n  }\n}\n\nmessage PresenceRequest {\n  required string jid = 1;\n  optional string from_jid = 2;\n}\n\nmessage PresenceResponse {\n  enum SHOW {\n    NORMAL = 0;\n    AWAY = 1;\n    DO_NOT_DISTURB = 2;\n    CHAT = 3;\n    EXTENDED_AWAY = 4;\n  }\n\n  required bool is_available = 1;\n  optional SHOW presence = 2;\n  optional bool valid = 3;\n}\n\nmessage BulkPresenceRequest {\n  repeated string jid = 1;\n  optional string from_jid = 2;\n}\n\nmessage BulkPresenceResponse {\n  repeated PresenceResponse presence_response = 1;\n}\n\nmessage XmppMessageRequest {\n  repeated string jid = 1;\n  required string body = 2;\n  optional bool raw_xml = 3 [ default = false ];\n  optional string type = 4 [ default = \"chat\" ];\n  optional string from_jid = 5;\n}\n\nmessage XmppMessageResponse {\n  enum XmppMessageStatus {\n    NO_ERROR = 0;\n    INVALID_JID = 1;\n    OTHER_ERROR = 2;\n  }\n\n  repeated XmppMessageStatus status = 1;\n}\n\nmessage XmppSendPresenceRequest {\n  required string jid = 1;\n  optional string type = 2;\n  optional string show = 3;\n  optional string status = 4;\n  optional string from_jid = 5;\n}\n\nmessage XmppSendPresenceResponse {\n}\n\nmessage XmppInviteRequest {\n  required string jid = 1;\n  optional string from_jid = 2;\n}\n\nmessage XmppInviteResponse {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/log/api.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage log\n\n// This file implements the logging API.\n\nimport (\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\n// Debugf formats its arguments according to the format, analogous to fmt.Printf,\n// and records the text as a log message at Debug level. The message will be associated\n// with the request linked with the provided context.\nfunc Debugf(ctx context.Context, format string, args ...interface{}) {\n\tinternal.Logf(ctx, 0, format, args...)\n}\n\n// Infof is like Debugf, but at Info level.\nfunc Infof(ctx context.Context, format string, args ...interface{}) {\n\tinternal.Logf(ctx, 1, format, args...)\n}\n\n// Warningf is like Debugf, but at Warning level.\nfunc Warningf(ctx context.Context, format string, args ...interface{}) {\n\tinternal.Logf(ctx, 2, format, args...)\n}\n\n// Errorf is like Debugf, but at Error level.\nfunc Errorf(ctx context.Context, format string, args ...interface{}) {\n\tinternal.Logf(ctx, 3, format, args...)\n}\n\n// Criticalf is like Debugf, but at Critical level.\nfunc Criticalf(ctx context.Context, format string, args ...interface{}) {\n\tinternal.Logf(ctx, 4, format, args...)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/log/log.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage log provides the means of querying an application's logs from\nwithin an App Engine application.\n\nExample:\n\tc := appengine.NewContext(r)\n\tquery := &log.Query{\n\t\tAppLogs:  true,\n\t\tVersions: []string{\"1\"},\n\t}\n\n\tfor results := query.Run(c); ; {\n\t\trecord, err := results.Next()\n\t\tif err == log.Done {\n\t\t\tlog.Infof(c, \"Done processing results\")\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Errorf(c, \"Failed to retrieve next log: %v\", err)\n\t\t\tbreak\n\t\t}\n\t\tlog.Infof(c, \"Saw record %v\", record)\n\t}\n*/\npackage log\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/log\"\n)\n\n// Query defines a logs query.\ntype Query struct {\n\t// Start time specifies the earliest log to return (inclusive).\n\tStartTime time.Time\n\n\t// End time specifies the latest log to return (exclusive).\n\tEndTime time.Time\n\n\t// Offset specifies a position within the log stream to resume reading from,\n\t// and should come from a previously returned Record's field of the same name.\n\tOffset []byte\n\n\t// Incomplete controls whether active (incomplete) requests should be included.\n\tIncomplete bool\n\n\t// AppLogs indicates if application-level logs should be included.\n\tAppLogs bool\n\n\t// ApplyMinLevel indicates if MinLevel should be used to filter results.\n\tApplyMinLevel bool\n\n\t// If ApplyMinLevel is true, only logs for requests with at least one\n\t// application log of MinLevel or higher will be returned.\n\tMinLevel int\n\n\t// Versions is the major version IDs whose logs should be retrieved.\n\t// Logs for specific modules can be retrieved by the specifying versions\n\t// in the form \"module:version\"; the default module is used if no module\n\t// is specified.\n\tVersions []string\n\n\t// A list of requests to search for instead of a time-based scan. Cannot be\n\t// combined with filtering options such as StartTime, EndTime, Offset,\n\t// Incomplete, ApplyMinLevel, or Versions.\n\tRequestIDs []string\n}\n\n// AppLog represents a single application-level log.\ntype AppLog struct {\n\tTime    time.Time\n\tLevel   int\n\tMessage string\n}\n\n// Record contains all the information for a single web request.\ntype Record struct {\n\tAppID            string\n\tModuleID         string\n\tVersionID        string\n\tRequestID        []byte\n\tIP               string\n\tNickname         string\n\tAppEngineRelease string\n\n\t// The time when this request started.\n\tStartTime time.Time\n\n\t// The time when this request finished.\n\tEndTime time.Time\n\n\t// Opaque cursor into the result stream.\n\tOffset []byte\n\n\t// The time required to process the request.\n\tLatency     time.Duration\n\tMCycles     int64\n\tMethod      string\n\tResource    string\n\tHTTPVersion string\n\tStatus      int32\n\n\t// The size of the request sent back to the client, in bytes.\n\tResponseSize int64\n\tReferrer     string\n\tUserAgent    string\n\tURLMapEntry  string\n\tCombined     string\n\tHost         string\n\n\t// The estimated cost of this request, in dollars.\n\tCost              float64\n\tTaskQueueName     string\n\tTaskName          string\n\tWasLoadingRequest bool\n\tPendingTime       time.Duration\n\tFinished          bool\n\tAppLogs           []AppLog\n\n\t// Mostly-unique identifier for the instance that handled the request if available.\n\tInstanceID string\n}\n\n// Result represents the result of a query.\ntype Result struct {\n\tlogs        []*Record\n\tcontext     context.Context\n\trequest     *pb.LogReadRequest\n\tresultsSeen bool\n\terr         error\n}\n\n// Next returns the next log record,\nfunc (qr *Result) Next() (*Record, error) {\n\tif qr.err != nil {\n\t\treturn nil, qr.err\n\t}\n\tif len(qr.logs) > 0 {\n\t\tlr := qr.logs[0]\n\t\tqr.logs = qr.logs[1:]\n\t\treturn lr, nil\n\t}\n\n\tif qr.request.Offset == nil && qr.resultsSeen {\n\t\treturn nil, Done\n\t}\n\n\tif err := qr.run(); err != nil {\n\t\t// Errors here may be retried, so don't store the error.\n\t\treturn nil, err\n\t}\n\n\treturn qr.Next()\n}\n\n// Done is returned when a query iteration has completed.\nvar Done = errors.New(\"log: query has no more results\")\n\n// protoToAppLogs takes as input an array of pointers to LogLines, the internal\n// Protocol Buffer representation of a single application-level log,\n// and converts it to an array of AppLogs, the external representation\n// of an application-level log.\nfunc protoToAppLogs(logLines []*pb.LogLine) []AppLog {\n\tappLogs := make([]AppLog, len(logLines))\n\n\tfor i, line := range logLines {\n\t\tappLogs[i] = AppLog{\n\t\t\tTime:    time.Unix(0, *line.Time*1e3),\n\t\t\tLevel:   int(*line.Level),\n\t\t\tMessage: *line.LogMessage,\n\t\t}\n\t}\n\n\treturn appLogs\n}\n\n// protoToRecord converts a RequestLog, the internal Protocol Buffer\n// representation of a single request-level log, to a Record, its\n// corresponding external representation.\nfunc protoToRecord(rl *pb.RequestLog) *Record {\n\toffset, err := proto.Marshal(rl.Offset)\n\tif err != nil {\n\t\toffset = nil\n\t}\n\treturn &Record{\n\t\tAppID:             *rl.AppId,\n\t\tModuleID:          rl.GetModuleId(),\n\t\tVersionID:         *rl.VersionId,\n\t\tRequestID:         rl.RequestId,\n\t\tOffset:            offset,\n\t\tIP:                *rl.Ip,\n\t\tNickname:          rl.GetNickname(),\n\t\tAppEngineRelease:  string(rl.GetAppEngineRelease()),\n\t\tStartTime:         time.Unix(0, *rl.StartTime*1e3),\n\t\tEndTime:           time.Unix(0, *rl.EndTime*1e3),\n\t\tLatency:           time.Duration(*rl.Latency) * time.Microsecond,\n\t\tMCycles:           *rl.Mcycles,\n\t\tMethod:            *rl.Method,\n\t\tResource:          *rl.Resource,\n\t\tHTTPVersion:       *rl.HttpVersion,\n\t\tStatus:            *rl.Status,\n\t\tResponseSize:      *rl.ResponseSize,\n\t\tReferrer:          rl.GetReferrer(),\n\t\tUserAgent:         rl.GetUserAgent(),\n\t\tURLMapEntry:       *rl.UrlMapEntry,\n\t\tCombined:          *rl.Combined,\n\t\tHost:              rl.GetHost(),\n\t\tCost:              rl.GetCost(),\n\t\tTaskQueueName:     rl.GetTaskQueueName(),\n\t\tTaskName:          rl.GetTaskName(),\n\t\tWasLoadingRequest: rl.GetWasLoadingRequest(),\n\t\tPendingTime:       time.Duration(rl.GetPendingTime()) * time.Microsecond,\n\t\tFinished:          rl.GetFinished(),\n\t\tAppLogs:           protoToAppLogs(rl.Line),\n\t\tInstanceID:        string(rl.GetCloneKey()),\n\t}\n}\n\n// Run starts a query for log records, which contain request and application\n// level log information.\nfunc (params *Query) Run(c context.Context) *Result {\n\treq, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c))\n\treturn &Result{\n\t\tcontext: c,\n\t\trequest: req,\n\t\terr:     err,\n\t}\n}\n\nfunc makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) {\n\treq := &pb.LogReadRequest{}\n\treq.AppId = &appID\n\tif !params.StartTime.IsZero() {\n\t\treq.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3)\n\t}\n\tif !params.EndTime.IsZero() {\n\t\treq.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3)\n\t}\n\tif len(params.Offset) > 0 {\n\t\tvar offset pb.LogOffset\n\t\tif err := proto.Unmarshal(params.Offset, &offset); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"bad Offset: %v\", err)\n\t\t}\n\t\treq.Offset = &offset\n\t}\n\tif params.Incomplete {\n\t\treq.IncludeIncomplete = &params.Incomplete\n\t}\n\tif params.AppLogs {\n\t\treq.IncludeAppLogs = &params.AppLogs\n\t}\n\tif params.ApplyMinLevel {\n\t\treq.MinimumLogLevel = proto.Int32(int32(params.MinLevel))\n\t}\n\tif params.Versions == nil {\n\t\t// If no versions were specified, default to the default module at\n\t\t// the major version being used by this module.\n\t\tif i := strings.Index(versionID, \".\"); i >= 0 {\n\t\t\tversionID = versionID[:i]\n\t\t}\n\t\treq.VersionId = []string{versionID}\n\t} else {\n\t\treq.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions))\n\t\tfor _, v := range params.Versions {\n\t\t\tvar m *string\n\t\t\tif i := strings.Index(v, \":\"); i >= 0 {\n\t\t\t\tm, v = proto.String(v[:i]), v[i+1:]\n\t\t\t}\n\t\t\treq.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{\n\t\t\t\tModuleId:  m,\n\t\t\t\tVersionId: proto.String(v),\n\t\t\t})\n\t\t}\n\t}\n\tif params.RequestIDs != nil {\n\t\tids := make([][]byte, len(params.RequestIDs))\n\t\tfor i, v := range params.RequestIDs {\n\t\t\tids[i] = []byte(v)\n\t\t}\n\t\treq.RequestId = ids\n\t}\n\n\treturn req, nil\n}\n\n// run takes the query Result produced by a call to Run and updates it with\n// more Records. The updated Result contains a new set of logs as well as an\n// offset to where more logs can be found. We also convert the items in the\n// response from their internal representations to external versions of the\n// same structs.\nfunc (r *Result) run() error {\n\tres := &pb.LogReadResponse{}\n\tif err := internal.Call(r.context, \"logservice\", \"Read\", r.request, res); err != nil {\n\t\treturn err\n\t}\n\n\tr.logs = make([]*Record, len(res.Log))\n\tr.request.Offset = res.Offset\n\tr.resultsSeen = true\n\n\tfor i, log := range res.Log {\n\t\tr.logs[i] = protoToRecord(log)\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"logservice\", pb.LogServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/mail/mail.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage mail provides the means of sending email from an\nApp Engine application.\n\nExample:\n\tmsg := &mail.Message{\n\t\tSender:  \"romeo@montague.com\",\n\t\tTo:      []string{\"Juliet <juliet@capulet.org>\"},\n\t\tSubject: \"See you tonight\",\n\t\tBody:    \"Don't forget our plans. Hark, 'til later.\",\n\t}\n\tif err := mail.Send(c, msg); err != nil {\n\t\tlog.Errorf(c, \"Alas, my user, the email failed to sendeth: %v\", err)\n\t}\n*/\npackage mail\n\nimport (\n\t\"net/mail\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tbpb \"google.golang.org/appengine/internal/base\"\n\tpb \"google.golang.org/appengine/internal/mail\"\n)\n\n// A Message represents an email message.\n// Addresses may be of any form permitted by RFC 822.\ntype Message struct {\n\t// Sender must be set, and must be either an application admin\n\t// or the currently signed-in user.\n\tSender  string\n\tReplyTo string // may be empty\n\n\t// At least one of these slices must have a non-zero length,\n\t// except when calling SendToAdmins.\n\tTo, Cc, Bcc []string\n\n\tSubject string\n\n\t// At least one of Body or HTMLBody must be non-empty.\n\tBody     string\n\tHTMLBody string\n\n\tAttachments []Attachment\n\n\t// Extra mail headers.\n\t// See https://cloud.google.com/appengine/docs/go/mail/\n\t// for permissible headers.\n\tHeaders mail.Header\n}\n\n// An Attachment represents an email attachment.\ntype Attachment struct {\n\t// Name must be set to a valid file name.\n\tName      string\n\tData      []byte\n\tContentID string\n}\n\n// Send sends an email message.\nfunc Send(c context.Context, msg *Message) error {\n\treturn send(c, \"Send\", msg)\n}\n\n// SendToAdmins sends an email message to the application's administrators.\nfunc SendToAdmins(c context.Context, msg *Message) error {\n\treturn send(c, \"SendToAdmins\", msg)\n}\n\nfunc send(c context.Context, method string, msg *Message) error {\n\treq := &pb.MailMessage{\n\t\tSender:  &msg.Sender,\n\t\tTo:      msg.To,\n\t\tCc:      msg.Cc,\n\t\tBcc:     msg.Bcc,\n\t\tSubject: &msg.Subject,\n\t}\n\tif msg.ReplyTo != \"\" {\n\t\treq.ReplyTo = &msg.ReplyTo\n\t}\n\tif msg.Body != \"\" {\n\t\treq.TextBody = &msg.Body\n\t}\n\tif msg.HTMLBody != \"\" {\n\t\treq.HtmlBody = &msg.HTMLBody\n\t}\n\tif len(msg.Attachments) > 0 {\n\t\treq.Attachment = make([]*pb.MailAttachment, len(msg.Attachments))\n\t\tfor i, att := range msg.Attachments {\n\t\t\treq.Attachment[i] = &pb.MailAttachment{\n\t\t\t\tFileName: proto.String(att.Name),\n\t\t\t\tData:     att.Data,\n\t\t\t}\n\t\t\tif att.ContentID != \"\" {\n\t\t\t\treq.Attachment[i].ContentID = proto.String(att.ContentID)\n\t\t\t}\n\t\t}\n\t}\n\tfor key, vs := range msg.Headers {\n\t\tfor _, v := range vs {\n\t\t\treq.Header = append(req.Header, &pb.MailHeader{\n\t\t\t\tName:  proto.String(key),\n\t\t\t\tValue: proto.String(v),\n\t\t\t})\n\t\t}\n\t}\n\tres := &bpb.VoidProto{}\n\tif err := internal.Call(c, \"mail\", method, req, res); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"mail\", pb.MailServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/memcache/memcache.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package memcache provides a client for App Engine's distributed in-memory\n// key-value store for small chunks of arbitrary data.\n//\n// The fundamental operations get and set items, keyed by a string.\n//\n//\titem0, err := memcache.Get(c, \"key\")\n//\tif err != nil && err != memcache.ErrCacheMiss {\n//\t\treturn err\n//\t}\n//\tif err == nil {\n//\t\tfmt.Fprintf(w, \"memcache hit: Key=%q Val=[% x]\\n\", item0.Key, item0.Value)\n//\t} else {\n//\t\tfmt.Fprintf(w, \"memcache miss\\n\")\n//\t}\n//\n// and\n//\n//\titem1 := &memcache.Item{\n//\t\tKey:   \"foo\",\n//\t\tValue: []byte(\"bar\"),\n//\t}\n//\tif err := memcache.Set(c, item1); err != nil {\n//\t\treturn err\n//\t}\npackage memcache\n\nimport (\n\t\"bytes\"\n\t\"encoding/gob\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/memcache\"\n)\n\nvar (\n\t// ErrCacheMiss means that an operation failed\n\t// because the item wasn't present.\n\tErrCacheMiss = errors.New(\"memcache: cache miss\")\n\t// ErrCASConflict means that a CompareAndSwap call failed due to the\n\t// cached value being modified between the Get and the CompareAndSwap.\n\t// If the cached value was simply evicted rather than replaced,\n\t// ErrNotStored will be returned instead.\n\tErrCASConflict = errors.New(\"memcache: compare-and-swap conflict\")\n\t// ErrNoStats means that no statistics were available.\n\tErrNoStats = errors.New(\"memcache: no statistics available\")\n\t// ErrNotStored means that a conditional write operation (i.e. Add or\n\t// CompareAndSwap) failed because the condition was not satisfied.\n\tErrNotStored = errors.New(\"memcache: item not stored\")\n\t// ErrServerError means that a server error occurred.\n\tErrServerError = errors.New(\"memcache: server error\")\n)\n\n// Item is the unit of memcache gets and sets.\ntype Item struct {\n\t// Key is the Item's key (250 bytes maximum).\n\tKey string\n\t// Value is the Item's value.\n\tValue []byte\n\t// Object is the Item's value for use with a Codec.\n\tObject interface{}\n\t// Flags are server-opaque flags whose semantics are entirely up to the\n\t// App Engine app.\n\tFlags uint32\n\t// Expiration is the maximum duration that the item will stay\n\t// in the cache.\n\t// The zero value means the Item has no expiration time.\n\t// Subsecond precision is ignored.\n\t// This is not set when getting items.\n\tExpiration time.Duration\n\t// casID is a client-opaque value used for compare-and-swap operations.\n\t// Zero means that compare-and-swap is not used.\n\tcasID uint64\n}\n\nconst (\n\tsecondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code\n\tthirtyYears      = time.Duration(secondsIn30Years) * time.Second\n)\n\n// protoToItem converts a protocol buffer item to a Go struct.\nfunc protoToItem(p *pb.MemcacheGetResponse_Item) *Item {\n\treturn &Item{\n\t\tKey:   string(p.Key),\n\t\tValue: p.Value,\n\t\tFlags: p.GetFlags(),\n\t\tcasID: p.GetCasId(),\n\t}\n}\n\n// If err is an appengine.MultiError, return its first element. Otherwise, return err.\nfunc singleError(err error) error {\n\tif me, ok := err.(appengine.MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// Get gets the item for the given key. ErrCacheMiss is returned for a memcache\n// cache miss. The key must be at most 250 bytes in length.\nfunc Get(c context.Context, key string) (*Item, error) {\n\tm, err := GetMulti(c, []string{key})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, ok := m[key]; !ok {\n\t\treturn nil, ErrCacheMiss\n\t}\n\treturn m[key], nil\n}\n\n// GetMulti is a batch version of Get. The returned map from keys to items may\n// have fewer elements than the input slice, due to memcache cache misses.\n// Each key must be at most 250 bytes in length.\nfunc GetMulti(c context.Context, key []string) (map[string]*Item, error) {\n\tif len(key) == 0 {\n\t\treturn nil, nil\n\t}\n\tkeyAsBytes := make([][]byte, len(key))\n\tfor i, k := range key {\n\t\tkeyAsBytes[i] = []byte(k)\n\t}\n\treq := &pb.MemcacheGetRequest{\n\t\tKey:    keyAsBytes,\n\t\tForCas: proto.Bool(true),\n\t}\n\tres := &pb.MemcacheGetResponse{}\n\tif err := internal.Call(c, \"memcache\", \"Get\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\tm := make(map[string]*Item, len(res.Item))\n\tfor _, p := range res.Item {\n\t\tt := protoToItem(p)\n\t\tm[t.Key] = t\n\t}\n\treturn m, nil\n}\n\n// Delete deletes the item for the given key.\n// ErrCacheMiss is returned if the specified item can not be found.\n// The key must be at most 250 bytes in length.\nfunc Delete(c context.Context, key string) error {\n\treturn singleError(DeleteMulti(c, []string{key}))\n}\n\n// DeleteMulti is a batch version of Delete.\n// If any keys cannot be found, an appengine.MultiError is returned.\n// Each key must be at most 250 bytes in length.\nfunc DeleteMulti(c context.Context, key []string) error {\n\tif len(key) == 0 {\n\t\treturn nil\n\t}\n\treq := &pb.MemcacheDeleteRequest{\n\t\tItem: make([]*pb.MemcacheDeleteRequest_Item, len(key)),\n\t}\n\tfor i, k := range key {\n\t\treq.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)}\n\t}\n\tres := &pb.MemcacheDeleteResponse{}\n\tif err := internal.Call(c, \"memcache\", \"Delete\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif len(res.DeleteStatus) != len(key) {\n\t\treturn ErrServerError\n\t}\n\tme, any := make(appengine.MultiError, len(key)), false\n\tfor i, s := range res.DeleteStatus {\n\t\tswitch s {\n\t\tcase pb.MemcacheDeleteResponse_DELETED:\n\t\t\t// OK\n\t\tcase pb.MemcacheDeleteResponse_NOT_FOUND:\n\t\t\tme[i] = ErrCacheMiss\n\t\t\tany = true\n\t\tdefault:\n\t\t\tme[i] = ErrServerError\n\t\t\tany = true\n\t\t}\n\t}\n\tif any {\n\t\treturn me\n\t}\n\treturn nil\n}\n\n// Increment atomically increments the decimal value in the given key\n// by delta and returns the new value. The value must fit in a uint64.\n// Overflow wraps around, and underflow is capped to zero. The\n// provided delta may be negative. If the key doesn't exist in\n// memcache, the provided initial value is used to atomically\n// populate it before the delta is applied.\n// The key must be at most 250 bytes in length.\nfunc Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) {\n\treturn incr(c, key, delta, &initialValue)\n}\n\n// IncrementExisting works like Increment but assumes that the key\n// already exists in memcache and doesn't take an initial value.\n// IncrementExisting can save work if calculating the initial value is\n// expensive.\n// An error is returned if the specified item can not be found.\nfunc IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) {\n\treturn incr(c, key, delta, nil)\n}\n\nfunc incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) {\n\treq := &pb.MemcacheIncrementRequest{\n\t\tKey:          []byte(key),\n\t\tInitialValue: initialValue,\n\t}\n\tif delta >= 0 {\n\t\treq.Delta = proto.Uint64(uint64(delta))\n\t} else {\n\t\treq.Delta = proto.Uint64(uint64(-delta))\n\t\treq.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum()\n\t}\n\tres := &pb.MemcacheIncrementResponse{}\n\terr = internal.Call(c, \"memcache\", \"Increment\", req, res)\n\tif err != nil {\n\t\treturn\n\t}\n\tif res.NewValue == nil {\n\t\treturn 0, ErrCacheMiss\n\t}\n\treturn *res.NewValue, nil\n}\n\n// set sets the given items using the given conflict resolution policy.\n// appengine.MultiError may be returned.\nfunc set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error {\n\tif len(item) == 0 {\n\t\treturn nil\n\t}\n\treq := &pb.MemcacheSetRequest{\n\t\tItem: make([]*pb.MemcacheSetRequest_Item, len(item)),\n\t}\n\tfor i, t := range item {\n\t\tp := &pb.MemcacheSetRequest_Item{\n\t\t\tKey: []byte(t.Key),\n\t\t}\n\t\tif value == nil {\n\t\t\tp.Value = t.Value\n\t\t} else {\n\t\t\tp.Value = value[i]\n\t\t}\n\t\tif t.Flags != 0 {\n\t\t\tp.Flags = proto.Uint32(t.Flags)\n\t\t}\n\t\tif t.Expiration != 0 {\n\t\t\t// In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned)\n\t\t\t// for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed).\n\t\t\t// Throughout this .go file, we use int32.\n\t\t\t// Also, in the proto, the expiration value is either a duration (in seconds)\n\t\t\t// or an absolute Unix timestamp (in seconds), depending on whether the\n\t\t\t// value is less than or greater than or equal to 30 years, respectively.\n\t\t\tif t.Expiration < time.Second {\n\t\t\t\t// Because an Expiration of 0 means no expiration, we take\n\t\t\t\t// care here to translate an item with an expiration\n\t\t\t\t// Duration between 0-1 seconds as immediately expiring\n\t\t\t\t// (saying it expired a few seconds ago), rather than\n\t\t\t\t// rounding it down to 0 and making it live forever.\n\t\t\t\tp.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5)\n\t\t\t} else if t.Expiration >= thirtyYears {\n\t\t\t\tp.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second))\n\t\t\t} else {\n\t\t\t\tp.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second))\n\t\t\t}\n\t\t}\n\t\tif t.casID != 0 {\n\t\t\tp.CasId = proto.Uint64(t.casID)\n\t\t\tp.ForCas = proto.Bool(true)\n\t\t}\n\t\tp.SetPolicy = policy.Enum()\n\t\treq.Item[i] = p\n\t}\n\tres := &pb.MemcacheSetResponse{}\n\tif err := internal.Call(c, \"memcache\", \"Set\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif len(res.SetStatus) != len(item) {\n\t\treturn ErrServerError\n\t}\n\tme, any := make(appengine.MultiError, len(item)), false\n\tfor i, st := range res.SetStatus {\n\t\tvar err error\n\t\tswitch st {\n\t\tcase pb.MemcacheSetResponse_STORED:\n\t\t\t// OK\n\t\tcase pb.MemcacheSetResponse_NOT_STORED:\n\t\t\terr = ErrNotStored\n\t\tcase pb.MemcacheSetResponse_EXISTS:\n\t\t\terr = ErrCASConflict\n\t\tdefault:\n\t\t\terr = ErrServerError\n\t\t}\n\t\tif err != nil {\n\t\t\tme[i] = err\n\t\t\tany = true\n\t\t}\n\t}\n\tif any {\n\t\treturn me\n\t}\n\treturn nil\n}\n\n// Set writes the given item, unconditionally.\nfunc Set(c context.Context, item *Item) error {\n\treturn singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET))\n}\n\n// SetMulti is a batch version of Set.\n// appengine.MultiError may be returned.\nfunc SetMulti(c context.Context, item []*Item) error {\n\treturn set(c, item, nil, pb.MemcacheSetRequest_SET)\n}\n\n// Add writes the given item, if no value already exists for its key.\n// ErrNotStored is returned if that condition is not met.\nfunc Add(c context.Context, item *Item) error {\n\treturn singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD))\n}\n\n// AddMulti is a batch version of Add.\n// appengine.MultiError may be returned.\nfunc AddMulti(c context.Context, item []*Item) error {\n\treturn set(c, item, nil, pb.MemcacheSetRequest_ADD)\n}\n\n// CompareAndSwap writes the given item that was previously returned by Get,\n// if the value was neither modified or evicted between the Get and the\n// CompareAndSwap calls. The item's Key should not change between calls but\n// all other item fields may differ.\n// ErrCASConflict is returned if the value was modified in between the calls.\n// ErrNotStored is returned if the value was evicted in between the calls.\nfunc CompareAndSwap(c context.Context, item *Item) error {\n\treturn singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS))\n}\n\n// CompareAndSwapMulti is a batch version of CompareAndSwap.\n// appengine.MultiError may be returned.\nfunc CompareAndSwapMulti(c context.Context, item []*Item) error {\n\treturn set(c, item, nil, pb.MemcacheSetRequest_CAS)\n}\n\n// Codec represents a symmetric pair of functions that implement a codec.\n// Items stored into or retrieved from memcache using a Codec have their\n// values marshaled or unmarshaled.\n//\n// All the methods provided for Codec behave analogously to the package level\n// function with same name.\ntype Codec struct {\n\tMarshal   func(interface{}) ([]byte, error)\n\tUnmarshal func([]byte, interface{}) error\n}\n\n// Get gets the item for the given key and decodes the obtained value into v.\n// ErrCacheMiss is returned for a memcache cache miss.\n// The key must be at most 250 bytes in length.\nfunc (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) {\n\ti, err := Get(c, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := cd.Unmarshal(i.Value, v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn i, nil\n}\n\nfunc (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error {\n\tvar vs [][]byte\n\tvar me appengine.MultiError\n\tfor i, item := range items {\n\t\tv, err := cd.Marshal(item.Object)\n\t\tif err != nil {\n\t\t\tif me == nil {\n\t\t\t\tme = make(appengine.MultiError, len(items))\n\t\t\t}\n\t\t\tme[i] = err\n\t\t\tcontinue\n\t\t}\n\t\tif me == nil {\n\t\t\tvs = append(vs, v)\n\t\t}\n\t}\n\tif me != nil {\n\t\treturn me\n\t}\n\n\treturn set(c, items, vs, policy)\n}\n\n// Set writes the given item, unconditionally.\nfunc (cd Codec) Set(c context.Context, item *Item) error {\n\treturn singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET))\n}\n\n// SetMulti is a batch version of Set.\n// appengine.MultiError may be returned.\nfunc (cd Codec) SetMulti(c context.Context, items []*Item) error {\n\treturn cd.set(c, items, pb.MemcacheSetRequest_SET)\n}\n\n// Add writes the given item, if no value already exists for its key.\n// ErrNotStored is returned if that condition is not met.\nfunc (cd Codec) Add(c context.Context, item *Item) error {\n\treturn singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD))\n}\n\n// AddMulti is a batch version of Add.\n// appengine.MultiError may be returned.\nfunc (cd Codec) AddMulti(c context.Context, items []*Item) error {\n\treturn cd.set(c, items, pb.MemcacheSetRequest_ADD)\n}\n\n// CompareAndSwap writes the given item that was previously returned by Get,\n// if the value was neither modified or evicted between the Get and the\n// CompareAndSwap calls. The item's Key should not change between calls but\n// all other item fields may differ.\n// ErrCASConflict is returned if the value was modified in between the calls.\n// ErrNotStored is returned if the value was evicted in between the calls.\nfunc (cd Codec) CompareAndSwap(c context.Context, item *Item) error {\n\treturn singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS))\n}\n\n// CompareAndSwapMulti is a batch version of CompareAndSwap.\n// appengine.MultiError may be returned.\nfunc (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error {\n\treturn cd.set(c, items, pb.MemcacheSetRequest_CAS)\n}\n\nvar (\n\t// Gob is a Codec that uses the gob package.\n\tGob = Codec{gobMarshal, gobUnmarshal}\n\t// JSON is a Codec that uses the json package.\n\tJSON = Codec{json.Marshal, json.Unmarshal}\n)\n\nfunc gobMarshal(v interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tif err := gob.NewEncoder(&buf).Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}\n\nfunc gobUnmarshal(data []byte, v interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(v)\n}\n\n// Statistics represents a set of statistics about the memcache cache.\n// This may include items that have expired but have not yet been removed from the cache.\ntype Statistics struct {\n\tHits     uint64 // Counter of cache hits\n\tMisses   uint64 // Counter of cache misses\n\tByteHits uint64 // Counter of bytes transferred for gets\n\n\tItems uint64 // Items currently in the cache\n\tBytes uint64 // Size of all items currently in the cache\n\n\tOldest int64 // Age of access of the oldest item, in seconds\n}\n\n// Stats retrieves the current memcache statistics.\nfunc Stats(c context.Context) (*Statistics, error) {\n\treq := &pb.MemcacheStatsRequest{}\n\tres := &pb.MemcacheStatsResponse{}\n\tif err := internal.Call(c, \"memcache\", \"Stats\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\tif res.Stats == nil {\n\t\treturn nil, ErrNoStats\n\t}\n\treturn &Statistics{\n\t\tHits:     *res.Stats.Hits,\n\t\tMisses:   *res.Stats.Misses,\n\t\tByteHits: *res.Stats.ByteHits,\n\t\tItems:    *res.Stats.Items,\n\t\tBytes:    *res.Stats.Bytes,\n\t\tOldest:   int64(*res.Stats.OldestItemAge),\n\t}, nil\n}\n\n// Flush flushes all items from memcache.\nfunc Flush(c context.Context) error {\n\treq := &pb.MemcacheFlushRequest{}\n\tres := &pb.MemcacheFlushResponse{}\n\treturn internal.Call(c, \"memcache\", \"FlushAll\", req, res)\n}\n\nfunc namespaceMod(m proto.Message, namespace string) {\n\tswitch m := m.(type) {\n\tcase *pb.MemcacheDeleteRequest:\n\t\tif m.NameSpace == nil {\n\t\t\tm.NameSpace = &namespace\n\t\t}\n\tcase *pb.MemcacheGetRequest:\n\t\tif m.NameSpace == nil {\n\t\t\tm.NameSpace = &namespace\n\t\t}\n\tcase *pb.MemcacheIncrementRequest:\n\t\tif m.NameSpace == nil {\n\t\t\tm.NameSpace = &namespace\n\t\t}\n\tcase *pb.MemcacheSetRequest:\n\t\tif m.NameSpace == nil {\n\t\t\tm.NameSpace = &namespace\n\t\t}\n\t\t// MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace.\n\t}\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"memcache\", pb.MemcacheServiceError_ErrorCode_name)\n\tinternal.NamespaceMods[\"memcache\"] = namespaceMod\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/module/module.go",
    "content": "// Copyright 2013 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage module provides functions for interacting with modules.\n\nThe appengine package contains functions that report the identity of the app,\nincluding the module name.\n*/\npackage module\n\nimport (\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/modules\"\n)\n\n// List returns the names of modules belonging to this application.\nfunc List(c context.Context) ([]string, error) {\n\treq := &pb.GetModulesRequest{}\n\tres := &pb.GetModulesResponse{}\n\terr := internal.Call(c, \"modules\", \"GetModules\", req, res)\n\treturn res.Module, err\n}\n\n// NumInstances returns the number of instances of the given module/version.\n// If either argument is the empty string it means the default.\nfunc NumInstances(c context.Context, module, version string) (int, error) {\n\treq := &pb.GetNumInstancesRequest{}\n\tif module != \"\" {\n\t\treq.Module = &module\n\t}\n\tif version != \"\" {\n\t\treq.Version = &version\n\t}\n\tres := &pb.GetNumInstancesResponse{}\n\n\tif err := internal.Call(c, \"modules\", \"GetNumInstances\", req, res); err != nil {\n\t\treturn 0, err\n\t}\n\treturn int(*res.Instances), nil\n}\n\n// SetNumInstances sets the number of instances of the given module.version to the\n// specified value. If either module or version are the empty string it means the\n// default.\nfunc SetNumInstances(c context.Context, module, version string, instances int) error {\n\treq := &pb.SetNumInstancesRequest{}\n\tif module != \"\" {\n\t\treq.Module = &module\n\t}\n\tif version != \"\" {\n\t\treq.Version = &version\n\t}\n\treq.Instances = proto.Int64(int64(instances))\n\tres := &pb.SetNumInstancesResponse{}\n\treturn internal.Call(c, \"modules\", \"SetNumInstances\", req, res)\n}\n\n// Versions returns the names of the versions that belong to the specified module.\n// If module is the empty string, it means the default module.\nfunc Versions(c context.Context, module string) ([]string, error) {\n\treq := &pb.GetVersionsRequest{}\n\tif module != \"\" {\n\t\treq.Module = &module\n\t}\n\tres := &pb.GetVersionsResponse{}\n\terr := internal.Call(c, \"modules\", \"GetVersions\", req, res)\n\treturn res.GetVersion(), err\n}\n\n// DefaultVersion returns the default version of the specified module.\n// If module is the empty string, it means the default module.\nfunc DefaultVersion(c context.Context, module string) (string, error) {\n\treq := &pb.GetDefaultVersionRequest{}\n\tif module != \"\" {\n\t\treq.Module = &module\n\t}\n\tres := &pb.GetDefaultVersionResponse{}\n\terr := internal.Call(c, \"modules\", \"GetDefaultVersion\", req, res)\n\treturn res.GetVersion(), err\n}\n\n// Start starts the specified version of the specified module.\n// If either module or version are the empty string, it means the default.\nfunc Start(c context.Context, module, version string) error {\n\treq := &pb.StartModuleRequest{}\n\tif module != \"\" {\n\t\treq.Module = &module\n\t}\n\tif version != \"\" {\n\t\treq.Version = &version\n\t}\n\tres := &pb.StartModuleResponse{}\n\treturn internal.Call(c, \"modules\", \"StartModule\", req, res)\n}\n\n// Stop stops the specified version of the specified module.\n// If either module or version are the empty string, it means the default.\nfunc Stop(c context.Context, module, version string) error {\n\treq := &pb.StopModuleRequest{}\n\tif module != \"\" {\n\t\treq.Module = &module\n\t}\n\tif version != \"\" {\n\t\treq.Version = &version\n\t}\n\tres := &pb.StopModuleResponse{}\n\treturn internal.Call(c, \"modules\", \"StopModule\", req, res)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/namespace.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage appengine\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\n// Namespace returns a replacement context that operates within the given namespace.\nfunc Namespace(c context.Context, namespace string) (context.Context, error) {\n\tif !validNamespace.MatchString(namespace) {\n\t\treturn nil, fmt.Errorf(\"appengine: namespace %q does not match /%s/\", namespace, validNamespace)\n\t}\n\treturn internal.NamespacedContext(c, namespace), nil\n}\n\n// validNamespace matches valid namespace names.\nvar validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/remote_api/client.go",
    "content": "// Copyright 2013 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage remote_api\n\n// This file provides the client for connecting remotely to a user's production\n// application.\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/remote_api\"\n)\n\n// NewRemoteContext returns a context that gives access to the production\n// APIs for the application at the given host. All communication will be\n// performed over SSL unless the host is localhost.\nfunc NewRemoteContext(host string, client *http.Client) (context.Context, error) {\n\t// Add an appcfg header to outgoing requests.\n\tt := client.Transport\n\tif t == nil {\n\t\tt = http.DefaultTransport\n\t}\n\tclient.Transport = &headerAddingRoundTripper{t}\n\n\turl := url.URL{\n\t\tScheme: \"https\",\n\t\tHost:   host,\n\t\tPath:   \"/_ah/remote_api\",\n\t}\n\tif host == \"localhost\" || strings.HasPrefix(host, \"localhost:\") {\n\t\turl.Scheme = \"http\"\n\t}\n\tu := url.String()\n\tappID, err := getAppID(client, u)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to contact server: %v\", err)\n\t}\n\trc := &remoteContext{\n\t\tclient: client,\n\t\turl:    u,\n\t}\n\tctx := internal.WithCallOverride(context.Background(), rc.call)\n\tctx = internal.WithLogOverride(ctx, rc.logf)\n\tctx = internal.WithAppIDOverride(ctx, appID)\n\treturn ctx, nil\n}\n\ntype remoteContext struct {\n\tclient *http.Client\n\turl    string\n}\n\nvar logLevels = map[int64]string{\n\t0: \"DEBUG\",\n\t1: \"INFO\",\n\t2: \"WARNING\",\n\t3: \"ERROR\",\n\t4: \"CRITICAL\",\n}\n\nfunc (c *remoteContext) logf(level int64, format string, args ...interface{}) {\n\tlog.Printf(logLevels[level]+\": \"+format, args...)\n}\n\nfunc (c *remoteContext) call(ctx context.Context, service, method string, in, out proto.Message) error {\n\treq, err := proto.Marshal(in)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshalling request: %v\", err)\n\t}\n\n\tremReq := &pb.Request{\n\t\tServiceName: proto.String(service),\n\t\tMethod:      proto.String(method),\n\t\tRequest:     req,\n\t\t// NOTE(djd): RequestId is unused in the server.\n\t}\n\n\treq, err = proto.Marshal(remReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"proto.Marshal: %v\", err)\n\t}\n\n\t// TODO(djd): Respect ctx.Deadline()?\n\tresp, err := c.client.Post(c.url, \"application/octet-stream\", bytes.NewReader(req))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error sending request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"bad response %d; body: %q\", resp.StatusCode, body)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading response: %v\", err)\n\t}\n\tremResp := &pb.Response{}\n\tif err := proto.Unmarshal(body, remResp); err != nil {\n\t\treturn fmt.Errorf(\"error unmarshalling response: %v\", err)\n\t}\n\n\tif ae := remResp.GetApplicationError(); ae != nil {\n\t\treturn &internal.APIError{\n\t\t\tCode:    ae.GetCode(),\n\t\t\tDetail:  ae.GetDetail(),\n\t\t\tService: service,\n\t\t}\n\t}\n\n\tif remResp.Response == nil {\n\t\treturn fmt.Errorf(\"unexpected response: %s\", proto.MarshalTextString(remResp))\n\t}\n\n\treturn proto.Unmarshal(remResp.Response, out)\n}\n\n// This is a forgiving regexp designed to parse the app ID from YAML.\nvar appIDRE = regexp.MustCompile(`app_id[\"']?\\s*:\\s*['\"]?([-a-z0-9.:~]+)`)\n\nfunc getAppID(client *http.Client, url string) (string, error) {\n\t// Generate a pseudo-random token for handshaking.\n\ttoken := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int())\n\n\tresp, err := client.Get(fmt.Sprintf(\"%s?rtok=%s\", url, token))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", fmt.Errorf(\"bad response %d; body: %q\", resp.StatusCode, body)\n\t}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed reading response: %v\", err)\n\t}\n\n\t// Check the token is present in response.\n\tif !bytes.Contains(body, []byte(token)) {\n\t\treturn \"\", fmt.Errorf(\"token not found: want %q; body %q\", token, body)\n\t}\n\n\tmatch := appIDRE.FindSubmatch(body)\n\tif match == nil {\n\t\treturn \"\", fmt.Errorf(\"app ID not found: body %q\", body)\n\t}\n\n\treturn string(match[1]), nil\n}\n\ntype headerAddingRoundTripper struct {\n\tWrapped http.RoundTripper\n}\n\nfunc (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {\n\tr.Header.Set(\"X-Appcfg-Api-Version\", \"1\")\n\treturn t.Wrapped.RoundTrip(r)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/remote_api/remote_api.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage remote_api implements the /_ah/remote_api endpoint.\nThis endpoint is used by offline tools such as the bulk loader.\n*/\npackage remote_api\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strconv\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/remote_api\"\n\t\"google.golang.org/appengine/log\"\n\t\"google.golang.org/appengine/user\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"/_ah/remote_api\", handle)\n}\n\nfunc handle(w http.ResponseWriter, req *http.Request) {\n\tc := appengine.NewContext(req)\n\n\tu := user.Current(c)\n\tif u == nil {\n\t\tu, _ = user.CurrentOAuth(c,\n\t\t\t\"https://www.googleapis.com/auth/cloud-platform\",\n\t\t\t\"https://www.googleapis.com/auth/appengine.apis\",\n\t\t)\n\t}\n\n\tif u == nil || !u.Admin {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tio.WriteString(w, \"You must be logged in as an administrator to access this.\\n\")\n\t\treturn\n\t}\n\tif req.Header.Get(\"X-Appcfg-Api-Version\") == \"\" {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\tio.WriteString(w, \"This request did not contain a necessary header.\\n\")\n\t\treturn\n\t}\n\n\tif req.Method != \"POST\" {\n\t\t// Response must be YAML.\n\t\trtok := req.FormValue(\"rtok\")\n\t\tif rtok == \"\" {\n\t\t\trtok = \"0\"\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text/yaml; charset=utf-8\")\n\t\tfmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok)\n\t\treturn\n\t}\n\n\tdefer req.Body.Close()\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tlog.Errorf(c, \"Failed reading body: %v\", err)\n\t\treturn\n\t}\n\tremReq := &pb.Request{}\n\tif err := proto.Unmarshal(body, remReq); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tlog.Errorf(c, \"Bad body: %v\", err)\n\t\treturn\n\t}\n\n\tservice, method := *remReq.ServiceName, *remReq.Method\n\tif !requestSupported(service, method) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tlog.Errorf(c, \"Unsupported RPC /%s.%s\", service, method)\n\t\treturn\n\t}\n\n\trawReq := &rawMessage{remReq.Request}\n\trawRes := &rawMessage{}\n\terr = internal.Call(c, service, method, rawReq, rawRes)\n\n\tremRes := &pb.Response{}\n\tif err == nil {\n\t\tremRes.Response = rawRes.buf\n\t} else if ae, ok := err.(*internal.APIError); ok {\n\t\tremRes.ApplicationError = &pb.ApplicationError{\n\t\t\tCode:   &ae.Code,\n\t\t\tDetail: &ae.Detail,\n\t\t}\n\t} else {\n\t\t// This shouldn't normally happen.\n\t\tlog.Errorf(c, \"appengine/remote_api: Unexpected error of type %T: %v\", err, err)\n\t\tremRes.ApplicationError = &pb.ApplicationError{\n\t\t\tCode:   proto.Int32(0),\n\t\t\tDetail: proto.String(err.Error()),\n\t\t}\n\t}\n\tout, err := proto.Marshal(remRes)\n\tif err != nil {\n\t\t// This should not be possible.\n\t\tw.WriteHeader(500)\n\t\tlog.Errorf(c, \"proto.Marshal: %v\", err)\n\t\treturn\n\t}\n\n\tlog.Infof(c, \"Spooling %d bytes of response to /%s.%s\", len(out), service, method)\n\tw.Header().Set(\"Content-Type\", \"application/octet-stream\")\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(out)))\n\tw.Write(out)\n}\n\n// rawMessage is a protocol buffer type that is already serialised.\n// This allows the remote_api code here to handle messages\n// without having to know the real type.\ntype rawMessage struct {\n\tbuf []byte\n}\n\nfunc (rm *rawMessage) Marshal() ([]byte, error) {\n\treturn rm.buf, nil\n}\n\nfunc (rm *rawMessage) Unmarshal(buf []byte) error {\n\trm.buf = make([]byte, len(buf))\n\tcopy(rm.buf, buf)\n\treturn nil\n}\n\nfunc requestSupported(service, method string) bool {\n\t// This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py\n\tswitch service {\n\tcase \"app_identity_service\", \"blobstore\", \"capability_service\", \"channel\", \"datastore_v3\",\n\t\t\"datastore_v4\", \"file\", \"images\", \"logservice\", \"mail\", \"matcher\", \"memcache\", \"remote_datastore\",\n\t\t\"remote_socket\", \"search\", \"modules\", \"system\", \"taskqueue\", \"urlfetch\", \"user\", \"xmpp\":\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Methods to satisfy proto.Message.\nfunc (rm *rawMessage) Reset()         { rm.buf = nil }\nfunc (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) }\nfunc (*rawMessage) ProtoMessage()     {}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/runtime/runtime.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage runtime exposes information about the resource usage of the application.\nIt also provides a way to run code in a new background context of a module.\n\nThis package does not work on Managed VMs.\n*/\npackage runtime\n\nimport (\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/system\"\n)\n\n// Statistics represents the system's statistics.\ntype Statistics struct {\n\t// CPU records the CPU consumed by this instance, in megacycles.\n\tCPU struct {\n\t\tTotal   float64\n\t\tRate1M  float64 // consumption rate over one minute\n\t\tRate10M float64 // consumption rate over ten minutes\n\t}\n\t// RAM records the memory used by the instance, in megabytes.\n\tRAM struct {\n\t\tCurrent    float64\n\t\tAverage1M  float64 // average usage over one minute\n\t\tAverage10M float64 // average usage over ten minutes\n\t}\n}\n\nfunc Stats(c context.Context) (*Statistics, error) {\n\treq := &pb.GetSystemStatsRequest{}\n\tres := &pb.GetSystemStatsResponse{}\n\tif err := internal.Call(c, \"system\", \"GetSystemStats\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\ts := &Statistics{}\n\tif res.Cpu != nil {\n\t\ts.CPU.Total = res.Cpu.GetTotal()\n\t\ts.CPU.Rate1M = res.Cpu.GetRate1M()\n\t\ts.CPU.Rate10M = res.Cpu.GetRate10M()\n\t}\n\tif res.Memory != nil {\n\t\ts.RAM.Current = res.Memory.GetCurrent()\n\t\ts.RAM.Average1M = res.Memory.GetAverage1M()\n\t\ts.RAM.Average10M = res.Memory.GetAverage10M()\n\t}\n\treturn s, nil\n}\n\n/*\nRunInBackground makes an API call that triggers an /_ah/background request.\n\nThere are two independent code paths that need to make contact:\nthe RunInBackground code, and the /_ah/background handler. The matchmaker\nloop arranges for the two paths to meet. The RunInBackground code passes\na send to the matchmaker, the /_ah/background passes a recv to the matchmaker,\nand the matchmaker hooks them up.\n*/\n\nfunc init() {\n\thttp.HandleFunc(\"/_ah/background\", handleBackground)\n\n\tsc := make(chan send)\n\trc := make(chan recv)\n\tsendc, recvc = sc, rc\n\tgo matchmaker(sc, rc)\n}\n\nvar (\n\tsendc chan<- send // RunInBackground sends to this\n\trecvc chan<- recv // handleBackground sends to this\n)\n\ntype send struct {\n\tid string\n\tf  func(context.Context)\n}\n\ntype recv struct {\n\tid string\n\tch chan<- func(context.Context)\n}\n\nfunc matchmaker(sendc <-chan send, recvc <-chan recv) {\n\t// When one side of the match arrives before the other\n\t// it is inserted in the corresponding map.\n\twaitSend := make(map[string]send)\n\twaitRecv := make(map[string]recv)\n\n\tfor {\n\t\tselect {\n\t\tcase s := <-sendc:\n\t\t\tif r, ok := waitRecv[s.id]; ok {\n\t\t\t\t// meet!\n\t\t\t\tdelete(waitRecv, s.id)\n\t\t\t\tr.ch <- s.f\n\t\t\t} else {\n\t\t\t\t// waiting for r\n\t\t\t\twaitSend[s.id] = s\n\t\t\t}\n\t\tcase r := <-recvc:\n\t\t\tif s, ok := waitSend[r.id]; ok {\n\t\t\t\t// meet!\n\t\t\t\tdelete(waitSend, r.id)\n\t\t\t\tr.ch <- s.f\n\t\t\t} else {\n\t\t\t\t// waiting for s\n\t\t\t\twaitRecv[r.id] = r\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar newContext = appengine.NewContext // for testing\n\nfunc handleBackground(w http.ResponseWriter, req *http.Request) {\n\tid := req.Header.Get(\"X-AppEngine-BackgroundRequest\")\n\n\tch := make(chan func(context.Context))\n\trecvc <- recv{id, ch}\n\t(<-ch)(newContext(req))\n}\n\n// RunInBackground runs f in a background goroutine in this process.\n// f is provided a context that may outlast the context provided to RunInBackground.\n// This is only valid to invoke from a manually scaled module.\nfunc RunInBackground(c context.Context, f func(c context.Context)) error {\n\treq := &pb.StartBackgroundRequestRequest{}\n\tres := &pb.StartBackgroundRequestResponse{}\n\tif err := internal.Call(c, \"system\", \"StartBackgroundRequest\", req, res); err != nil {\n\t\treturn err\n\t}\n\tsendc <- send{res.GetRequestId(), f}\n\treturn nil\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"system\", pb.SystemServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/search/doc.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage search provides a client for App Engine's search service.\n\n\nBasic Operations\n\nIndexes contain documents. Each index is identified by its name: a\nhuman-readable ASCII string.\n\nWithin an index, documents are associated with an ID, which is also\na human-readable ASCII string. A document's contents are a mapping from\ncase-sensitive field names to values. Valid types for field values are:\n  - string,\n  - search.Atom,\n  - search.HTML,\n  - time.Time (stored with millisecond precision),\n  - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive),\n  - appengine.GeoPoint.\n\nThe Get and Put methods on an Index load and save a document.\nA document's contents are typically represented by a struct pointer.\n\nExample code:\n\n\ttype Doc struct {\n\t\tAuthor   string\n\t\tComment  string\n\t\tCreation time.Time\n\t}\n\n\tindex, err := search.Open(\"comments\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tnewID, err := index.Put(ctx, \"\", &Doc{\n\t\tAuthor:   \"gopher\",\n\t\tComment:  \"the truth of the matter\",\n\t\tCreation: time.Now(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\nA single document can be retrieved by its ID. Pass a destination struct\nto Get to hold the resulting document.\n\n\tvar doc Doc\n\terr := index.Get(ctx, id, &doc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\nSearch and Listing Documents\n\nIndexes have two methods for retrieving multiple documents at once: Search and\nList.\n\nSearching an index for a query will result in an iterator. As with an iterator\nfrom package datastore, pass a destination struct to Next to decode the next\nresult. Next will return Done when the iterator is exhausted.\n\n\tfor t := index.Search(ctx, \"Comment:truth\", nil); ; {\n\t\tvar doc Doc\n\t\tid, err := t.Next(&doc)\n\t\tif err == search.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(w, \"%s -> %#v\\n\", id, doc)\n\t}\n\nSearch takes a string query to determine which documents to return. The query\ncan be simple, such as a single word to match, or complex. The query\nlanguage is described at\nhttps://cloud.google.com/appengine/docs/go/search/query_strings\n\nSearch also takes an optional SearchOptions struct which gives much more\ncontrol over how results are calculated and returned.\n\nCall List to iterate over all documents in an index.\n\n\tfor t := index.List(ctx, nil); ; {\n\t\tvar doc Doc\n\t\tid, err := t.Next(&doc)\n\t\tif err == search.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(w, \"%s -> %#v\\n\", id, doc)\n\t}\n\n\nFields and Facets\n\nA document's contents can be represented by a variety of types. These are\ntypically struct pointers, but they can also be represented by any type\nimplementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata\nto be set for the document with the DocumentMetadata type. Struct pointers are\nmore strongly typed and are easier to use; FieldLoadSavers are more flexible.\n\nA document's contents can be expressed in two ways: fields and facets.\n\nFields are the most common way of providing content for documents. Fields can\nstore data in multiple types and can be matched in searches using query\nstrings.\n\nFacets provide a way to attach categorical information to a document. The only\nvalid types for facets are search.Atom and float64. Facets allow search\nresults to contain summaries of the categories matched in a search, and to\nrestrict searches to only match against specific categories.\n\nBy default, for struct pointers, all of the struct fields are used as document\nfields, and the field name used is the same as on the struct (and hence must\nstart with an upper case letter). Struct fields may have a\n`search:\"name,options\"` tag. The name must start with a letter and be\ncomposed only of word characters. If options is \"facet\" then the struct\nfield will be used as a document facet. If options is \"\" then the comma\nmay be omitted. There are no other recognized options.\n\nExample code:\n\n\t// A and B are renamed to a and b.\n\t// A, C and I are facets.\n\t// D's tag is equivalent to having no tag at all (E).\n\t// I has tag information for both the search and json packages.\n\ttype TaggedStruct struct {\n\t\tA float64 `search:\"a,facet\"`\n\t\tB float64 `search:\"b\"`\n\t\tC float64 `search:\",facet\"`\n\t\tD float64 `search:\"\"`\n\t\tE float64\n\t\tI float64 `search:\",facet\" json:\"i\"`\n\t}\n\n\nThe FieldLoadSaver Interface\n\nA document's contents can also be represented by any type that implements the\nFieldLoadSaver interface. This type may be a struct pointer, but it\ndoes not have to be. The search package will call Load when loading the\ndocument's contents, and Save when saving them. In addition to a slice of\nFields, the Load and Save methods also use the DocumentMetadata type to\nprovide additional information about a document (such as its Rank, or set of\nFacets). Possible uses for this interface include deriving non-stored fields,\nverifying fields or setting specific languages for string and HTML fields.\n\nExample code:\n\n\ttype CustomFieldsExample struct {\n\t\t// Item's title and which language it is in.\n\t\tTitle string\n\t\tLang  string\n\t\t// Mass, in grams.\n\t\tMass int\n\t}\n\n\tfunc (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error {\n\t\t// Load the title field, failing if any other field is found.\n\t\tfor _, f := range fields {\n\t\t\tif f.Name != \"title\" {\n\t\t\t\treturn fmt.Errorf(\"unknown field %q\", f.Name)\n\t\t\t}\n\t\t\ts, ok := f.Value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"unsupported type %T for field %q\", f.Value, f.Name)\n\t\t\t}\n\t\t\tx.Title = s\n\t\t\tx.Lang = f.Language\n\t\t}\n\t\t// Load the mass facet, failing if any other facet is found.\n\t\tfor _, f := range meta.Facets {\n\t\t\tif f.Name != \"mass\" {\n\t\t\t\treturn fmt.Errorf(\"unknown facet %q\", f.Name)\n\t\t\t}\n\t\t\tm, ok := f.Value.(float64)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"unsupported type %T for facet %q\", f.Value, f.Name)\n\t\t\t}\n\t\t\tx.Mass = int(m)\n\t\t}\n\t\treturn nil\n\t}\n\n\tfunc (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) {\n\t\tfields := []search.Field{\n\t\t\t{Name: \"title\", Value: x.Title, Language: x.Lang},\n\t\t}\n\t\tmeta := &search.DocumentMetadata{\n\t\t\tFacets: {\n\t\t\t\t{Name: \"mass\", Value: float64(x.Mass)},\n\t\t\t},\n\t\t}\n\t\treturn fields, meta, nil\n\t}\n*/\npackage search\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/search/field.go",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage search\n\n// Field is a name/value pair. A search index's document can be loaded and\n// saved as a sequence of Fields.\ntype Field struct {\n\t// Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/.\n\tName string\n\t// Value is the field value. The valid types are:\n\t//  - string,\n\t//  - search.Atom,\n\t//  - search.HTML,\n\t//  - time.Time (stored with millisecond precision),\n\t//  - float64,\n\t//  - GeoPoint.\n\tValue interface{}\n\t// Language is a two-letter ISO 639-1 code for the field's language,\n\t// defaulting to \"en\" if nothing is specified. It may only be specified for\n\t// fields of type string and search.HTML.\n\tLanguage string\n\t// Derived marks fields that were calculated as a result of a\n\t// FieldExpression provided to Search. This field is ignored when saving a\n\t// document.\n\tDerived bool\n}\n\n// Facet is a name/value pair which is used to add categorical information to a\n// document.\ntype Facet struct {\n\t// Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/.\n\t// A facet name cannot be longer than 500 characters.\n\tName string\n\t// Value is the facet value.\n\t//\n\t// When being used in documents (for example, in\n\t// DocumentMetadata.Facets), the valid types are:\n\t//  - search.Atom,\n\t//  - float64.\n\t//\n\t// When being used in SearchOptions.Refinements or being returned\n\t// in FacetResult, the valid types are:\n\t//  - search.Atom,\n\t//  - search.Range.\n\tValue interface{}\n}\n\n// DocumentMetadata is a struct containing information describing a given document.\ntype DocumentMetadata struct {\n\t// Rank is an integer specifying the order the document will be returned in\n\t// search results. If zero, the rank will be set to the number of seconds since\n\t// 2011-01-01 00:00:00 UTC when being Put into an index.\n\tRank int\n\t// Facets is the set of facets for this document.\n\tFacets []Facet\n}\n\n// FieldLoadSaver can be converted from and to a slice of Fields\n// with additional document metadata.\ntype FieldLoadSaver interface {\n\tLoad([]Field, *DocumentMetadata) error\n\tSave() ([]Field, *DocumentMetadata, error)\n}\n\n// FieldList converts a []Field to implement FieldLoadSaver.\ntype FieldList []Field\n\n// Load loads all of the provided fields into l.\n// It does not first reset *l to an empty slice.\nfunc (l *FieldList) Load(f []Field, _ *DocumentMetadata) error {\n\t*l = append(*l, f...)\n\treturn nil\n}\n\n// Save returns all of l's fields as a slice of Fields.\nfunc (l *FieldList) Save() ([]Field, *DocumentMetadata, error) {\n\treturn *l, nil, nil\n}\n\nvar _ FieldLoadSaver = (*FieldList)(nil)\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/search/search.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage search\n\n// TODO: let Put specify the document language: \"en\", \"fr\", etc. Also: order_id?? storage??\n// TODO: Index.GetAll (or Iterator.GetAll)?\n// TODO: struct <-> protobuf tests.\n// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero\n// time.Time)? _MAXIMUM_STRING_LENGTH?\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode/utf8\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/search\"\n)\n\nvar (\n\t// ErrInvalidDocumentType is returned when methods like Put, Get or Next\n\t// are passed a dst or src argument of invalid type.\n\tErrInvalidDocumentType = errors.New(\"search: invalid document type\")\n\n\t// ErrNoSuchDocument is returned when no document was found for a given ID.\n\tErrNoSuchDocument = errors.New(\"search: no such document\")\n)\n\n// Atom is a document field whose contents are indexed as a single indivisible\n// string.\ntype Atom string\n\n// HTML is a document field whose contents are indexed as HTML. Only text nodes\n// are indexed: \"foo<b>bar\" will be treated as \"foobar\".\ntype HTML string\n\n// validIndexNameOrDocID is the Go equivalent of Python's\n// _ValidateVisiblePrintableAsciiNotReserved.\nfunc validIndexNameOrDocID(s string) bool {\n\tif strings.HasPrefix(s, \"!\") {\n\t\treturn false\n\t}\n\tfor _, c := range s {\n\t\tif c < 0x21 || 0x7f <= c {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nvar (\n\tfieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`)\n\tlanguageRE  = regexp.MustCompile(`^[a-z]{2}$`)\n)\n\n// validFieldName is the Go equivalent of Python's _CheckFieldName. It checks\n// the validity of both field and facet names.\nfunc validFieldName(s string) bool {\n\treturn len(s) <= 500 && fieldNameRE.MatchString(s)\n}\n\n// validDocRank checks that the ranks is in the range [0, 2^31).\nfunc validDocRank(r int) bool {\n\treturn 0 <= r && r <= (1<<31-1)\n}\n\n// validLanguage checks that a language looks like ISO 639-1.\nfunc validLanguage(s string) bool {\n\treturn languageRE.MatchString(s)\n}\n\n// validFloat checks that f is in the range [-2147483647, 2147483647].\nfunc validFloat(f float64) bool {\n\treturn -(1<<31-1) <= f && f <= (1<<31-1)\n}\n\n// Index is an index of documents.\ntype Index struct {\n\tspec pb.IndexSpec\n}\n\n// orderIDEpoch forms the basis for populating OrderId on documents.\nvar orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC)\n\n// Open opens the index with the given name. The index is created if it does\n// not already exist.\n//\n// The name is a human-readable ASCII string. It must contain no whitespace\n// characters and not start with \"!\".\nfunc Open(name string) (*Index, error) {\n\tif !validIndexNameOrDocID(name) {\n\t\treturn nil, fmt.Errorf(\"search: invalid index name %q\", name)\n\t}\n\treturn &Index{\n\t\tspec: pb.IndexSpec{\n\t\t\tName: &name,\n\t\t},\n\t}, nil\n}\n\n// Put saves src to the index. If id is empty, a new ID is allocated by the\n// service and returned. If id is not empty, any existing index entry for that\n// ID is replaced.\n//\n// The ID is a human-readable ASCII string. It must contain no whitespace\n// characters and not start with \"!\".\n//\n// src must be a non-nil struct pointer or implement the FieldLoadSaver\n// interface.\nfunc (x *Index) Put(c context.Context, id string, src interface{}) (string, error) {\n\td, err := saveDoc(src)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif id != \"\" {\n\t\tif !validIndexNameOrDocID(id) {\n\t\t\treturn \"\", fmt.Errorf(\"search: invalid ID %q\", id)\n\t\t}\n\t\td.Id = proto.String(id)\n\t}\n\treq := &pb.IndexDocumentRequest{\n\t\tParams: &pb.IndexDocumentParams{\n\t\t\tDocument:  []*pb.Document{d},\n\t\t\tIndexSpec: &x.spec,\n\t\t},\n\t}\n\tres := &pb.IndexDocumentResponse{}\n\tif err := internal.Call(c, \"search\", \"IndexDocument\", req, res); err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(res.Status) > 0 {\n\t\tif s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {\n\t\t\treturn \"\", fmt.Errorf(\"search: %s: %s\", s.GetCode(), s.GetErrorDetail())\n\t\t}\n\t}\n\tif len(res.Status) != 1 || len(res.DocId) != 1 {\n\t\treturn \"\", fmt.Errorf(\"search: internal error: wrong number of results (%d Statuses, %d DocIDs)\",\n\t\t\tlen(res.Status), len(res.DocId))\n\t}\n\treturn res.DocId[0], nil\n}\n\n// Get loads the document with the given ID into dst.\n//\n// The ID is a human-readable ASCII string. It must be non-empty, contain no\n// whitespace characters and not start with \"!\".\n//\n// dst must be a non-nil struct pointer or implement the FieldLoadSaver\n// interface.\n//\n// ErrFieldMismatch is returned when a field is to be loaded into a different\n// type than the one it was stored from, or when a field is missing or\n// unexported in the destination struct. ErrFieldMismatch is only returned if\n// dst is a struct pointer. It is up to the callee to decide whether this error\n// is fatal, recoverable, or ignorable.\nfunc (x *Index) Get(c context.Context, id string, dst interface{}) error {\n\tif id == \"\" || !validIndexNameOrDocID(id) {\n\t\treturn fmt.Errorf(\"search: invalid ID %q\", id)\n\t}\n\treq := &pb.ListDocumentsRequest{\n\t\tParams: &pb.ListDocumentsParams{\n\t\t\tIndexSpec:  &x.spec,\n\t\t\tStartDocId: proto.String(id),\n\t\t\tLimit:      proto.Int32(1),\n\t\t},\n\t}\n\tres := &pb.ListDocumentsResponse{}\n\tif err := internal.Call(c, \"search\", \"ListDocuments\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {\n\t\treturn fmt.Errorf(\"search: %s: %s\", res.Status.GetCode(), res.Status.GetErrorDetail())\n\t}\n\tif len(res.Document) != 1 || res.Document[0].GetId() != id {\n\t\treturn ErrNoSuchDocument\n\t}\n\treturn loadDoc(dst, res.Document[0], nil)\n}\n\n// Delete deletes a document from the index.\nfunc (x *Index) Delete(c context.Context, id string) error {\n\treq := &pb.DeleteDocumentRequest{\n\t\tParams: &pb.DeleteDocumentParams{\n\t\t\tDocId:     []string{id},\n\t\t\tIndexSpec: &x.spec,\n\t\t},\n\t}\n\tres := &pb.DeleteDocumentResponse{}\n\tif err := internal.Call(c, \"search\", \"DeleteDocument\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif len(res.Status) != 1 {\n\t\treturn fmt.Errorf(\"search: internal error: wrong number of results (%d)\", len(res.Status))\n\t}\n\tif s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {\n\t\treturn fmt.Errorf(\"search: %s: %s\", s.GetCode(), s.GetErrorDetail())\n\t}\n\treturn nil\n}\n\n// List lists all of the documents in an index. The documents are returned in\n// increasing ID order.\nfunc (x *Index) List(c context.Context, opts *ListOptions) *Iterator {\n\tt := &Iterator{\n\t\tc:             c,\n\t\tindex:         x,\n\t\tcount:         -1,\n\t\tlistInclusive: true,\n\t\tmore:          moreList,\n\t}\n\tif opts != nil {\n\t\tt.listStartID = opts.StartID\n\t\tt.limit = opts.Limit\n\t\tt.idsOnly = opts.IDsOnly\n\t}\n\treturn t\n}\n\nfunc moreList(t *Iterator) error {\n\treq := &pb.ListDocumentsRequest{\n\t\tParams: &pb.ListDocumentsParams{\n\t\t\tIndexSpec: &t.index.spec,\n\t\t},\n\t}\n\tif t.listStartID != \"\" {\n\t\treq.Params.StartDocId = &t.listStartID\n\t\treq.Params.IncludeStartDoc = &t.listInclusive\n\t}\n\tif t.limit > 0 {\n\t\treq.Params.Limit = proto.Int32(int32(t.limit))\n\t}\n\tif t.idsOnly {\n\t\treq.Params.KeysOnly = &t.idsOnly\n\t}\n\n\tres := &pb.ListDocumentsResponse{}\n\tif err := internal.Call(t.c, \"search\", \"ListDocuments\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {\n\t\treturn fmt.Errorf(\"search: %s: %s\", res.Status.GetCode(), res.Status.GetErrorDetail())\n\t}\n\tt.listRes = res.Document\n\tt.listStartID, t.listInclusive, t.more = \"\", false, nil\n\tif len(res.Document) != 0 && t.limit <= 0 {\n\t\tif id := res.Document[len(res.Document)-1].GetId(); id != \"\" {\n\t\t\tt.listStartID, t.more = id, moreList\n\t\t}\n\t}\n\treturn nil\n}\n\n// ListOptions are the options for listing documents in an index. Passing a nil\n// *ListOptions is equivalent to using the default values.\ntype ListOptions struct {\n\t// StartID is the inclusive lower bound for the ID of the returned\n\t// documents. The zero value means all documents will be returned.\n\tStartID string\n\n\t// Limit is the maximum number of documents to return. The zero value\n\t// indicates no limit.\n\tLimit int\n\n\t// IDsOnly indicates that only document IDs should be returned for the list\n\t// operation; no document fields are populated.\n\tIDsOnly bool\n}\n\n// Search searches the index for the given query.\nfunc (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator {\n\tt := &Iterator{\n\t\tc:           c,\n\t\tindex:       x,\n\t\tsearchQuery: query,\n\t\tmore:        moreSearch,\n\t}\n\tif opts != nil {\n\t\tif opts.Cursor != \"\" {\n\t\t\tif opts.Offset != 0 {\n\t\t\t\treturn errIter(\"at most one of Cursor and Offset may be specified\")\n\t\t\t}\n\t\t\tt.searchCursor = proto.String(string(opts.Cursor))\n\t\t}\n\t\tt.limit = opts.Limit\n\t\tt.fields = opts.Fields\n\t\tt.idsOnly = opts.IDsOnly\n\t\tt.sort = opts.Sort\n\t\tt.exprs = opts.Expressions\n\t\tt.refinements = opts.Refinements\n\t\tt.facetOpts = opts.Facets\n\t\tt.searchOffset = opts.Offset\n\t}\n\treturn t\n}\n\nfunc moreSearch(t *Iterator) error {\n\t// We use per-result (rather than single/per-page) cursors since this\n\t// lets us return a Cursor for every iterator document. The two cursor\n\t// types are largely interchangeable: a page cursor is the same as the\n\t// last per-result cursor in a given search response.\n\treq := &pb.SearchRequest{\n\t\tParams: &pb.SearchParams{\n\t\t\tIndexSpec:  &t.index.spec,\n\t\t\tQuery:      &t.searchQuery,\n\t\t\tCursor:     t.searchCursor,\n\t\t\tCursorType: pb.SearchParams_PER_RESULT.Enum(),\n\t\t\tFieldSpec: &pb.FieldSpec{\n\t\t\t\tName: t.fields,\n\t\t\t},\n\t\t},\n\t}\n\tif t.limit > 0 {\n\t\treq.Params.Limit = proto.Int32(int32(t.limit))\n\t}\n\tif t.searchOffset > 0 {\n\t\treq.Params.Offset = proto.Int32(int32(t.searchOffset))\n\t\tt.searchOffset = 0\n\t}\n\tif t.idsOnly {\n\t\treq.Params.KeysOnly = &t.idsOnly\n\t}\n\tif t.sort != nil {\n\t\tif err := sortToProto(t.sort, req.Params); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif t.refinements != nil {\n\t\tif err := refinementsToProto(t.refinements, req.Params); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, e := range t.exprs {\n\t\treq.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{\n\t\t\tName:       proto.String(e.Name),\n\t\t\tExpression: proto.String(e.Expr),\n\t\t})\n\t}\n\tfor _, f := range t.facetOpts {\n\t\tif err := f.setParams(req.Params); err != nil {\n\t\t\treturn fmt.Errorf(\"bad FacetSearchOption: %v\", err)\n\t\t}\n\t}\n\t// Don't repeat facet search.\n\tt.facetOpts = nil\n\n\tres := &pb.SearchResponse{}\n\tif err := internal.Call(t.c, \"search\", \"Search\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {\n\t\treturn fmt.Errorf(\"search: %s: %s\", res.Status.GetCode(), res.Status.GetErrorDetail())\n\t}\n\tt.searchRes = res.Result\n\tif len(res.FacetResult) > 0 {\n\t\tt.facetRes = res.FacetResult\n\t}\n\tt.count = int(*res.MatchedCount)\n\tif t.limit > 0 {\n\t\tt.more = nil\n\t} else {\n\t\tt.more = moreSearch\n\t}\n\treturn nil\n}\n\n// SearchOptions are the options for searching an index. Passing a nil\n// *SearchOptions is equivalent to using the default values.\ntype SearchOptions struct {\n\t// Limit is the maximum number of documents to return. The zero value\n\t// indicates no limit.\n\tLimit int\n\n\t// IDsOnly indicates that only document IDs should be returned for the search\n\t// operation; no document fields are populated.\n\tIDsOnly bool\n\n\t// Sort controls the ordering of search results.\n\tSort *SortOptions\n\n\t// Fields specifies which document fields to include in the results. If omitted,\n\t// all document fields are returned. No more than 100 fields may be specified.\n\tFields []string\n\n\t// Expressions specifies additional computed fields to add to each returned\n\t// document.\n\tExpressions []FieldExpression\n\n\t// Facets controls what facet information is returned for these search results.\n\t// If no options are specified, no facet results will be returned.\n\tFacets []FacetSearchOption\n\n\t// Refinements filters the returned documents by requiring them to contain facets\n\t// with specific values. Refinements are applied in conjunction for facets with\n\t// different names, and in disjunction otherwise.\n\tRefinements []Facet\n\n\t// Cursor causes the results to commence with the first document after\n\t// the document associated with the cursor.\n\tCursor Cursor\n\n\t// Offset specifies the number of documents to skip over before returning results.\n\t// When specified, Cursor must be nil.\n\tOffset int\n}\n\n// Cursor represents an iterator's position.\n//\n// The string value of a cursor is web-safe. It can be saved and restored\n// for later use.\ntype Cursor string\n\n// FieldExpression defines a custom expression to evaluate for each result.\ntype FieldExpression struct {\n\t// Name is the name to use for the computed field.\n\tName string\n\n\t// Expr is evaluated to provide a custom content snippet for each document.\n\t// See https://cloud.google.com/appengine/docs/go/search/options for\n\t// the supported expression syntax.\n\tExpr string\n}\n\n// FacetSearchOption controls what facet information is returned in search results.\ntype FacetSearchOption interface {\n\tsetParams(*pb.SearchParams) error\n}\n\n// AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet\n// discovery for the search. Automatic facet discovery looks for the facets\n// which appear the most often in the aggregate in the matched documents.\n//\n// The maximum number of facets returned is controlled by facetLimit, and the\n// maximum number of values per facet by facetLimit. A limit of zero indicates\n// a default limit should be used.\nfunc AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption {\n\treturn &autoFacetOpt{facetLimit, valueLimit}\n}\n\ntype autoFacetOpt struct {\n\tfacetLimit, valueLimit int\n}\n\nconst defaultAutoFacetLimit = 10 // As per python runtime search.py.\n\nfunc (o *autoFacetOpt) setParams(params *pb.SearchParams) error {\n\tlim := int32(o.facetLimit)\n\tif lim == 0 {\n\t\tlim = defaultAutoFacetLimit\n\t}\n\tparams.AutoDiscoverFacetCount = &lim\n\tif o.valueLimit > 0 {\n\t\tparams.FacetAutoDetectParam = &pb.FacetAutoDetectParam{\n\t\t\tValueLimit: proto.Int32(int32(o.valueLimit)),\n\t\t}\n\t}\n\treturn nil\n}\n\n// FacetDiscovery returns a FacetSearchOption which selects a facet to be\n// returned with the search results. By default, the most frequently\n// occurring values for that facet will be returned. However, you can also\n// specify a list of particular Atoms or specific Ranges to return.\nfunc FacetDiscovery(name string, value ...interface{}) FacetSearchOption {\n\treturn &facetOpt{name, value}\n}\n\ntype facetOpt struct {\n\tname   string\n\tvalues []interface{}\n}\n\nfunc (o *facetOpt) setParams(params *pb.SearchParams) error {\n\treq := &pb.FacetRequest{Name: &o.name}\n\tparams.IncludeFacet = append(params.IncludeFacet, req)\n\tif len(o.values) == 0 {\n\t\treturn nil\n\t}\n\tvtype := reflect.TypeOf(o.values[0])\n\treqParam := &pb.FacetRequestParam{}\n\tfor _, v := range o.values {\n\t\tif reflect.TypeOf(v) != vtype {\n\t\t\treturn errors.New(\"values must all be Atom, or must all be Range\")\n\t\t}\n\t\tswitch v := v.(type) {\n\t\tcase Atom:\n\t\t\treqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v))\n\t\tcase Range:\n\t\t\trng, err := rangeToProto(v)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"invalid range: %v\", err)\n\t\t\t}\n\t\t\treqParam.Range = append(reqParam.Range, rng)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported value type %T\", v)\n\t\t}\n\t}\n\treq.Params = reqParam\n\treturn nil\n}\n\n// FacetDocumentDepth returns a FacetSearchOption which controls the number of\n// documents to be evaluated with preparing facet results.\nfunc FacetDocumentDepth(depth int) FacetSearchOption {\n\treturn facetDepthOpt(depth)\n}\n\ntype facetDepthOpt int\n\nfunc (o facetDepthOpt) setParams(params *pb.SearchParams) error {\n\tparams.FacetDepth = proto.Int32(int32(o))\n\treturn nil\n}\n\n// FacetResult represents the number of times a particular facet and value\n// appeared in the documents matching a search request.\ntype FacetResult struct {\n\tFacet\n\n\t// Count is the number of times this specific facet and value appeared in the\n\t// matching documents.\n\tCount int\n}\n\n// Range represents a numeric range with inclusive start and exclusive end.\n// Start may be specified as math.Inf(-1) to indicate there is no minimum\n// value, and End may similarly be specified as math.Inf(1); at least one of\n// Start or End must be a finite number.\ntype Range struct {\n\tStart, End float64\n}\n\nvar (\n\tnegInf = math.Inf(-1)\n\tposInf = math.Inf(1)\n)\n\n// AtLeast returns a Range matching any value greater than, or equal to, min.\nfunc AtLeast(min float64) Range {\n\treturn Range{Start: min, End: posInf}\n}\n\n// LessThan returns a Range matching any value less than max.\nfunc LessThan(max float64) Range {\n\treturn Range{Start: negInf, End: max}\n}\n\n// SortOptions control the ordering and scoring of search results.\ntype SortOptions struct {\n\t// Expressions is a slice of expressions representing a multi-dimensional\n\t// sort.\n\tExpressions []SortExpression\n\n\t// Scorer, when specified, will cause the documents to be scored according to\n\t// search term frequency.\n\tScorer Scorer\n\n\t// Limit is the maximum number of objects to score and/or sort. Limit cannot\n\t// be more than 10,000. The zero value indicates a default limit.\n\tLimit int\n}\n\n// SortExpression defines a single dimension for sorting a document.\ntype SortExpression struct {\n\t// Expr is evaluated to provide a sorting value for each document.\n\t// See https://cloud.google.com/appengine/docs/go/search/options for\n\t// the supported expression syntax.\n\tExpr string\n\n\t// Reverse causes the documents to be sorted in ascending order.\n\tReverse bool\n\n\t// The default value to use when no field is present or the expresion\n\t// cannot be calculated for a document. For text sorts, Default must\n\t// be of type string; for numeric sorts, float64.\n\tDefault interface{}\n}\n\n// A Scorer defines how a document is scored.\ntype Scorer interface {\n\ttoProto(*pb.ScorerSpec)\n}\n\ntype enumScorer struct {\n\tenum pb.ScorerSpec_Scorer\n}\n\nfunc (e enumScorer) toProto(spec *pb.ScorerSpec) {\n\tspec.Scorer = e.enum.Enum()\n}\n\nvar (\n\t// MatchScorer assigns a score based on term frequency in a document.\n\tMatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER}\n\n\t// RescoringMatchScorer assigns a score based on the quality of the query\n\t// match. It is similar to a MatchScorer but uses a more complex scoring\n\t// algorithm based on match term frequency and other factors like field type.\n\t// Please be aware that this algorithm is continually refined and can change\n\t// over time without notice. This means that the ordering of search results\n\t// that use this scorer can also change without notice.\n\tRescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER}\n)\n\nfunc sortToProto(sort *SortOptions, params *pb.SearchParams) error {\n\tfor _, e := range sort.Expressions {\n\t\tspec := &pb.SortSpec{\n\t\t\tSortExpression: proto.String(e.Expr),\n\t\t}\n\t\tif e.Reverse {\n\t\t\tspec.SortDescending = proto.Bool(false)\n\t\t}\n\t\tif e.Default != nil {\n\t\t\tswitch d := e.Default.(type) {\n\t\t\tcase float64:\n\t\t\t\tspec.DefaultValueNumeric = &d\n\t\t\tcase string:\n\t\t\t\tspec.DefaultValueText = &d\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"search: invalid Default type %T for expression %q\", d, e.Expr)\n\t\t\t}\n\t\t}\n\t\tparams.SortSpec = append(params.SortSpec, spec)\n\t}\n\n\tspec := &pb.ScorerSpec{}\n\tif sort.Limit > 0 {\n\t\tspec.Limit = proto.Int32(int32(sort.Limit))\n\t\tparams.ScorerSpec = spec\n\t}\n\tif sort.Scorer != nil {\n\t\tsort.Scorer.toProto(spec)\n\t\tparams.ScorerSpec = spec\n\t}\n\n\treturn nil\n}\n\nfunc refinementsToProto(refinements []Facet, params *pb.SearchParams) error {\n\tfor _, r := range refinements {\n\t\tref := &pb.FacetRefinement{\n\t\t\tName: proto.String(r.Name),\n\t\t}\n\t\tswitch v := r.Value.(type) {\n\t\tcase Atom:\n\t\t\tref.Value = proto.String(string(v))\n\t\tcase Range:\n\t\t\trng, err := rangeToProto(v)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"search: refinement for facet %q: %v\", r.Name, err)\n\t\t\t}\n\t\t\t// Unfortunately there are two identical messages for identify Facet ranges.\n\t\t\tref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"search: unsupported refinement for facet %q of type %T\", r.Name, v)\n\t\t}\n\t\tparams.FacetRefinement = append(params.FacetRefinement, ref)\n\t}\n\treturn nil\n}\n\nfunc rangeToProto(r Range) (*pb.FacetRange, error) {\n\trng := &pb.FacetRange{}\n\tif r.Start != negInf {\n\t\tif !validFloat(r.Start) {\n\t\t\treturn nil, errors.New(\"invalid value for Start\")\n\t\t}\n\t\trng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64))\n\t} else if r.End == posInf {\n\t\treturn nil, errors.New(\"either Start or End must be finite\")\n\t}\n\tif r.End != posInf {\n\t\tif !validFloat(r.End) {\n\t\t\treturn nil, errors.New(\"invalid value for End\")\n\t\t}\n\t\trng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64))\n\t}\n\treturn rng, nil\n}\n\nfunc protoToRange(rng *pb.FacetRefinement_Range) Range {\n\tr := Range{Start: negInf, End: posInf}\n\tif x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil {\n\t\tr.Start = x\n\t}\n\tif x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil {\n\t\tr.End = x\n\t}\n\treturn r\n}\n\n// Iterator is the result of searching an index for a query or listing an\n// index.\ntype Iterator struct {\n\tc     context.Context\n\tindex *Index\n\terr   error\n\n\tlistRes       []*pb.Document\n\tlistStartID   string\n\tlistInclusive bool\n\n\tsearchRes    []*pb.SearchResult\n\tfacetRes     []*pb.FacetResult\n\tsearchQuery  string\n\tsearchCursor *string\n\tsearchOffset int\n\tsort         *SortOptions\n\n\tfields      []string\n\texprs       []FieldExpression\n\trefinements []Facet\n\tfacetOpts   []FacetSearchOption\n\n\tmore func(*Iterator) error\n\n\tcount   int\n\tlimit   int // items left to return; 0 for unlimited.\n\tidsOnly bool\n}\n\n// errIter returns an iterator that only returns the given error.\nfunc errIter(err string) *Iterator {\n\treturn &Iterator{\n\t\terr: errors.New(err),\n\t}\n}\n\n// Done is returned when a query iteration has completed.\nvar Done = errors.New(\"search: query has no more results\")\n\n// Count returns an approximation of the number of documents matched by the\n// query. It is only valid to call for iterators returned by Search.\nfunc (t *Iterator) Count() int { return t.count }\n\n// fetchMore retrieves more results, if there are no errors or pending results.\nfunc (t *Iterator) fetchMore() {\n\tif t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil {\n\t\tt.err = t.more(t)\n\t}\n}\n\n// Next returns the ID of the next result. When there are no more results,\n// Done is returned as the error.\n//\n// dst must be a non-nil struct pointer, implement the FieldLoadSaver\n// interface, or be a nil interface value. If a non-nil dst is provided, it\n// will be filled with the indexed fields. dst is ignored if this iterator was\n// created with an IDsOnly option.\nfunc (t *Iterator) Next(dst interface{}) (string, error) {\n\tt.fetchMore()\n\tif t.err != nil {\n\t\treturn \"\", t.err\n\t}\n\n\tvar doc *pb.Document\n\tvar exprs []*pb.Field\n\tswitch {\n\tcase len(t.listRes) != 0:\n\t\tdoc = t.listRes[0]\n\t\tt.listRes = t.listRes[1:]\n\tcase len(t.searchRes) != 0:\n\t\tdoc = t.searchRes[0].Document\n\t\texprs = t.searchRes[0].Expression\n\t\tt.searchCursor = t.searchRes[0].Cursor\n\t\tt.searchRes = t.searchRes[1:]\n\tdefault:\n\t\treturn \"\", Done\n\t}\n\tif doc == nil {\n\t\treturn \"\", errors.New(\"search: internal error: no document returned\")\n\t}\n\tif !t.idsOnly && dst != nil {\n\t\tif err := loadDoc(dst, doc, exprs); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn doc.GetId(), nil\n}\n\n// Cursor returns the cursor associated with the current document (that is,\n// the document most recently returned by a call to Next).\n//\n// Passing this cursor in a future call to Search will cause those results\n// to commence with the first document after the current document.\nfunc (t *Iterator) Cursor() Cursor {\n\tif t.searchCursor == nil {\n\t\treturn \"\"\n\t}\n\treturn Cursor(*t.searchCursor)\n}\n\n// Facets returns the facets found within the search results, if any facets\n// were requested in the SearchOptions.\nfunc (t *Iterator) Facets() ([][]FacetResult, error) {\n\tt.fetchMore()\n\tif t.err != nil && t.err != Done {\n\t\treturn nil, t.err\n\t}\n\n\tvar facets [][]FacetResult\n\tfor _, f := range t.facetRes {\n\t\tfres := make([]FacetResult, 0, len(f.Value))\n\t\tfor _, v := range f.Value {\n\t\t\tref := v.Refinement\n\t\t\tfacet := FacetResult{\n\t\t\t\tFacet: Facet{Name: ref.GetName()},\n\t\t\t\tCount: int(v.GetCount()),\n\t\t\t}\n\t\t\tif ref.Value != nil {\n\t\t\t\tfacet.Value = Atom(*ref.Value)\n\t\t\t} else {\n\t\t\t\tfacet.Value = protoToRange(ref.Range)\n\t\t\t}\n\t\t\tfres = append(fres, facet)\n\t\t}\n\t\tfacets = append(facets, fres)\n\t}\n\treturn facets, nil\n}\n\n// saveDoc converts from a struct pointer or\n// FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf.\nfunc saveDoc(src interface{}) (*pb.Document, error) {\n\tvar err error\n\tvar fields []Field\n\tvar meta *DocumentMetadata\n\tswitch x := src.(type) {\n\tcase FieldLoadSaver:\n\t\tfields, meta, err = x.Save()\n\tdefault:\n\t\tfields, err = SaveStruct(src)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfieldsProto, err := fieldsToProto(fields)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td := &pb.Document{\n\t\tField:   fieldsProto,\n\t\tOrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),\n\t}\n\tif meta != nil {\n\t\tif meta.Rank != 0 {\n\t\t\tif !validDocRank(meta.Rank) {\n\t\t\t\treturn nil, fmt.Errorf(\"search: invalid rank %d, must be [0, 2^31)\", meta.Rank)\n\t\t\t}\n\t\t\t*d.OrderId = int32(meta.Rank)\n\t\t}\n\t\tif len(meta.Facets) > 0 {\n\t\t\tfacets, err := facetsToProto(meta.Facets)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\td.Facet = facets\n\t\t}\n\t}\n\treturn d, nil\n}\n\nfunc fieldsToProto(src []Field) ([]*pb.Field, error) {\n\t// Maps to catch duplicate time or numeric fields.\n\ttimeFields, numericFields := make(map[string]bool), make(map[string]bool)\n\tdst := make([]*pb.Field, 0, len(src))\n\tfor _, f := range src {\n\t\tif !validFieldName(f.Name) {\n\t\t\treturn nil, fmt.Errorf(\"search: invalid field name %q\", f.Name)\n\t\t}\n\t\tfieldValue := &pb.FieldValue{}\n\t\tswitch x := f.Value.(type) {\n\t\tcase string:\n\t\t\tfieldValue.Type = pb.FieldValue_TEXT.Enum()\n\t\t\tfieldValue.StringValue = proto.String(x)\n\t\tcase Atom:\n\t\t\tfieldValue.Type = pb.FieldValue_ATOM.Enum()\n\t\t\tfieldValue.StringValue = proto.String(string(x))\n\t\tcase HTML:\n\t\t\tfieldValue.Type = pb.FieldValue_HTML.Enum()\n\t\t\tfieldValue.StringValue = proto.String(string(x))\n\t\tcase time.Time:\n\t\t\tif timeFields[f.Name] {\n\t\t\t\treturn nil, fmt.Errorf(\"search: duplicate time field %q\", f.Name)\n\t\t\t}\n\t\t\ttimeFields[f.Name] = true\n\t\t\tfieldValue.Type = pb.FieldValue_DATE.Enum()\n\t\t\tfieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10))\n\t\tcase float64:\n\t\t\tif numericFields[f.Name] {\n\t\t\t\treturn nil, fmt.Errorf(\"search: duplicate numeric field %q\", f.Name)\n\t\t\t}\n\t\t\tif !validFloat(x) {\n\t\t\t\treturn nil, fmt.Errorf(\"search: numeric field %q with invalid value %f\", f.Name, x)\n\t\t\t}\n\t\t\tnumericFields[f.Name] = true\n\t\t\tfieldValue.Type = pb.FieldValue_NUMBER.Enum()\n\t\t\tfieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))\n\t\tcase appengine.GeoPoint:\n\t\t\tif !x.Valid() {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"search: GeoPoint field %q with invalid value %v\",\n\t\t\t\t\tf.Name, x)\n\t\t\t}\n\t\t\tfieldValue.Type = pb.FieldValue_GEO.Enum()\n\t\t\tfieldValue.Geo = &pb.FieldValue_Geo{\n\t\t\t\tLat: proto.Float64(x.Lat),\n\t\t\t\tLng: proto.Float64(x.Lng),\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"search: unsupported field type: %v\", reflect.TypeOf(f.Value))\n\t\t}\n\t\tif f.Language != \"\" {\n\t\t\tswitch f.Value.(type) {\n\t\t\tcase string, HTML:\n\t\t\t\tif !validLanguage(f.Language) {\n\t\t\t\t\treturn nil, fmt.Errorf(\"search: invalid language for field %q: %q\", f.Name, f.Language)\n\t\t\t\t}\n\t\t\t\tfieldValue.Language = proto.String(f.Language)\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"search: setting language not supported for field %q of type %T\", f.Name, f.Value)\n\t\t\t}\n\t\t}\n\t\tif p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) {\n\t\t\treturn nil, fmt.Errorf(\"search: %q field is invalid UTF-8: %q\", f.Name, *p)\n\t\t}\n\t\tdst = append(dst, &pb.Field{\n\t\t\tName:  proto.String(f.Name),\n\t\t\tValue: fieldValue,\n\t\t})\n\t}\n\treturn dst, nil\n}\n\nfunc facetsToProto(src []Facet) ([]*pb.Facet, error) {\n\tdst := make([]*pb.Facet, 0, len(src))\n\tfor _, f := range src {\n\t\tif !validFieldName(f.Name) {\n\t\t\treturn nil, fmt.Errorf(\"search: invalid facet name %q\", f.Name)\n\t\t}\n\t\tfacetValue := &pb.FacetValue{}\n\t\tswitch x := f.Value.(type) {\n\t\tcase Atom:\n\t\t\tif !utf8.ValidString(string(x)) {\n\t\t\t\treturn nil, fmt.Errorf(\"search: %q facet is invalid UTF-8: %q\", f.Name, x)\n\t\t\t}\n\t\t\tfacetValue.Type = pb.FacetValue_ATOM.Enum()\n\t\t\tfacetValue.StringValue = proto.String(string(x))\n\t\tcase float64:\n\t\t\tif !validFloat(x) {\n\t\t\t\treturn nil, fmt.Errorf(\"search: numeric facet %q with invalid value %f\", f.Name, x)\n\t\t\t}\n\t\t\tfacetValue.Type = pb.FacetValue_NUMBER.Enum()\n\t\t\tfacetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"search: unsupported facet type: %v\", reflect.TypeOf(f.Value))\n\t\t}\n\t\tdst = append(dst, &pb.Facet{\n\t\t\tName:  proto.String(f.Name),\n\t\t\tValue: facetValue,\n\t\t})\n\t}\n\treturn dst, nil\n}\n\n// loadDoc converts from protobufs to a struct pointer or\n// FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's\n// stored fields and facets, and any document metadata.  An additional slice of\n// fields, exprs, may optionally be provided to contain any derived expressions\n// requested by the developer.\nfunc loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) {\n\tfields, err := protoToFields(src.Field)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfacets, err := protoToFacets(src.Facet)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(exprs) > 0 {\n\t\texprFields, err := protoToFields(exprs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Mark each field as derived.\n\t\tfor i := range exprFields {\n\t\t\texprFields[i].Derived = true\n\t\t}\n\t\tfields = append(fields, exprFields...)\n\t}\n\tmeta := &DocumentMetadata{\n\t\tRank:   int(src.GetOrderId()),\n\t\tFacets: facets,\n\t}\n\tswitch x := dst.(type) {\n\tcase FieldLoadSaver:\n\t\treturn x.Load(fields, meta)\n\tdefault:\n\t\treturn loadStructWithMeta(dst, fields, meta)\n\t}\n}\n\nfunc protoToFields(fields []*pb.Field) ([]Field, error) {\n\tdst := make([]Field, 0, len(fields))\n\tfor _, field := range fields {\n\t\tfieldValue := field.GetValue()\n\t\tf := Field{\n\t\t\tName: field.GetName(),\n\t\t}\n\t\tswitch fieldValue.GetType() {\n\t\tcase pb.FieldValue_TEXT:\n\t\t\tf.Value = fieldValue.GetStringValue()\n\t\t\tf.Language = fieldValue.GetLanguage()\n\t\tcase pb.FieldValue_ATOM:\n\t\t\tf.Value = Atom(fieldValue.GetStringValue())\n\t\tcase pb.FieldValue_HTML:\n\t\t\tf.Value = HTML(fieldValue.GetStringValue())\n\t\t\tf.Language = fieldValue.GetLanguage()\n\t\tcase pb.FieldValue_DATE:\n\t\t\tsv := fieldValue.GetStringValue()\n\t\t\tmillis, err := strconv.ParseInt(sv, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"search: internal error: bad time.Time encoding %q: %v\", sv, err)\n\t\t\t}\n\t\t\tf.Value = time.Unix(0, millis*1e6)\n\t\tcase pb.FieldValue_NUMBER:\n\t\t\tsv := fieldValue.GetStringValue()\n\t\t\tx, err := strconv.ParseFloat(sv, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tf.Value = x\n\t\tcase pb.FieldValue_GEO:\n\t\t\tgeoValue := fieldValue.GetGeo()\n\t\t\tgeoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()}\n\t\t\tif !geoPoint.Valid() {\n\t\t\t\treturn nil, fmt.Errorf(\"search: internal error: invalid GeoPoint encoding: %v\", geoPoint)\n\t\t\t}\n\t\t\tf.Value = geoPoint\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"search: internal error: unknown data type %s\", fieldValue.GetType())\n\t\t}\n\t\tdst = append(dst, f)\n\t}\n\treturn dst, nil\n}\n\nfunc protoToFacets(facets []*pb.Facet) ([]Facet, error) {\n\tif len(facets) == 0 {\n\t\treturn nil, nil\n\t}\n\tdst := make([]Facet, 0, len(facets))\n\tfor _, facet := range facets {\n\t\tfacetValue := facet.GetValue()\n\t\tf := Facet{\n\t\t\tName: facet.GetName(),\n\t\t}\n\t\tswitch facetValue.GetType() {\n\t\tcase pb.FacetValue_ATOM:\n\t\t\tf.Value = Atom(facetValue.GetStringValue())\n\t\tcase pb.FacetValue_NUMBER:\n\t\t\tsv := facetValue.GetStringValue()\n\t\t\tx, err := strconv.ParseFloat(sv, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tf.Value = x\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"search: internal error: unknown data type %s\", facetValue.GetType())\n\t\t}\n\t\tdst = append(dst, f)\n\t}\n\treturn dst, nil\n}\n\nfunc namespaceMod(m proto.Message, namespace string) {\n\tset := func(s **string) {\n\t\tif *s == nil {\n\t\t\t*s = &namespace\n\t\t}\n\t}\n\tswitch m := m.(type) {\n\tcase *pb.IndexDocumentRequest:\n\t\tset(&m.Params.IndexSpec.Namespace)\n\tcase *pb.ListDocumentsRequest:\n\t\tset(&m.Params.IndexSpec.Namespace)\n\tcase *pb.DeleteDocumentRequest:\n\t\tset(&m.Params.IndexSpec.Namespace)\n\tcase *pb.SearchRequest:\n\t\tset(&m.Params.IndexSpec.Namespace)\n\t}\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"search\", pb.SearchServiceError_ErrorCode_name)\n\tinternal.NamespaceMods[\"search\"] = namespaceMod\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/search/struct.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage search\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\n// ErrFieldMismatch is returned when a field is to be loaded into a different\n// than the one it was stored from, or when a field is missing or unexported in\n// the destination struct.\ntype ErrFieldMismatch struct {\n\tFieldName string\n\tReason    string\n}\n\nfunc (e *ErrFieldMismatch) Error() string {\n\treturn fmt.Sprintf(\"search: cannot load field %q: %s\", e.FieldName, e.Reason)\n}\n\n// ErrFacetMismatch is returned when a facet is to be loaded into a different\n// type than the one it was stored from, or when a field is missing or\n// unexported in the destination struct. StructType is the type of the struct\n// pointed to by the destination argument passed to Iterator.Next.\ntype ErrFacetMismatch struct {\n\tStructType reflect.Type\n\tFacetName  string\n\tReason     string\n}\n\nfunc (e *ErrFacetMismatch) Error() string {\n\treturn fmt.Sprintf(\"search: cannot load facet %q into a %q: %s\", e.FacetName, e.StructType, e.Reason)\n}\n\n// structCodec defines how to convert a given struct to/from a search document.\ntype structCodec struct {\n\t// byIndex returns the struct tag for the i'th struct field.\n\tbyIndex []structTag\n\n\t// fieldByName returns the index of the struct field for the given field name.\n\tfieldByName map[string]int\n\n\t// facetByName returns the index of the struct field for the given facet name,\n\tfacetByName map[string]int\n}\n\n// structTag holds a structured version of each struct field's parsed tag.\ntype structTag struct {\n\tname  string\n\tfacet bool\n}\n\nvar (\n\tcodecsMu sync.RWMutex\n\tcodecs   = map[reflect.Type]*structCodec{}\n)\n\nfunc loadCodec(t reflect.Type) (*structCodec, error) {\n\tcodecsMu.RLock()\n\tcodec, ok := codecs[t]\n\tcodecsMu.RUnlock()\n\tif ok {\n\t\treturn codec, nil\n\t}\n\n\tcodecsMu.Lock()\n\tdefer codecsMu.Unlock()\n\tif codec, ok := codecs[t]; ok {\n\t\treturn codec, nil\n\t}\n\n\tcodec = &structCodec{\n\t\tfieldByName: make(map[string]int),\n\t\tfacetByName: make(map[string]int),\n\t}\n\n\tfor i, I := 0, t.NumField(); i < I; i++ {\n\t\tf := t.Field(i)\n\t\tname, opts := f.Tag.Get(\"search\"), \"\"\n\t\tif i := strings.Index(name, \",\"); i != -1 {\n\t\t\tname, opts = name[:i], name[i+1:]\n\t\t}\n\t\t// TODO(davidday): Support name==\"-\" as per datastore.\n\t\tif name == \"\" {\n\t\t\tname = f.Name\n\t\t} else if !validFieldName(name) {\n\t\t\treturn nil, fmt.Errorf(\"search: struct tag has invalid field name: %q\", name)\n\t\t}\n\t\tfacet := opts == \"facet\"\n\t\tcodec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet})\n\t\tif facet {\n\t\t\tcodec.facetByName[name] = i\n\t\t} else {\n\t\t\tcodec.fieldByName[name] = i\n\t\t}\n\t}\n\n\tcodecs[t] = codec\n\treturn codec, nil\n}\n\n// structFLS adapts a struct to be a FieldLoadSaver.\ntype structFLS struct {\n\tv     reflect.Value\n\tcodec *structCodec\n}\n\nfunc (s structFLS) Load(fields []Field, meta *DocumentMetadata) error {\n\tvar err error\n\tfor _, field := range fields {\n\t\ti, ok := s.codec.fieldByName[field.Name]\n\t\tif !ok {\n\t\t\t// Note the error, but keep going.\n\t\t\terr = &ErrFieldMismatch{\n\t\t\t\tFieldName: field.Name,\n\t\t\t\tReason:    \"no such struct field\",\n\t\t\t}\n\t\t\tcontinue\n\n\t\t}\n\t\tf := s.v.Field(i)\n\t\tif !f.CanSet() {\n\t\t\t// Note the error, but keep going.\n\t\t\terr = &ErrFieldMismatch{\n\t\t\t\tFieldName: field.Name,\n\t\t\t\tReason:    \"cannot set struct field\",\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tv := reflect.ValueOf(field.Value)\n\t\tif ft, vt := f.Type(), v.Type(); ft != vt {\n\t\t\terr = &ErrFieldMismatch{\n\t\t\t\tFieldName: field.Name,\n\t\t\t\tReason:    fmt.Sprintf(\"type mismatch: %v for %v data\", ft, vt),\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tf.Set(v)\n\t}\n\tif meta == nil {\n\t\treturn nil\n\t}\n\tfor _, facet := range meta.Facets {\n\t\ti, ok := s.codec.facetByName[facet.Name]\n\t\tif !ok {\n\t\t\t// Note the error, but keep going.\n\t\t\tif err == nil {\n\t\t\t\terr = &ErrFacetMismatch{\n\t\t\t\t\tStructType: s.v.Type(),\n\t\t\t\t\tFacetName:  facet.Name,\n\t\t\t\t\tReason:     \"no matching field found\",\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tf := s.v.Field(i)\n\t\tif !f.CanSet() {\n\t\t\t// Note the error, but keep going.\n\t\t\tif err == nil {\n\t\t\t\terr = &ErrFacetMismatch{\n\t\t\t\t\tStructType: s.v.Type(),\n\t\t\t\t\tFacetName:  facet.Name,\n\t\t\t\t\tReason:     \"unable to set unexported field of struct\",\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tv := reflect.ValueOf(facet.Value)\n\t\tif ft, vt := f.Type(), v.Type(); ft != vt {\n\t\t\tif err == nil {\n\t\t\t\terr = &ErrFacetMismatch{\n\t\t\t\t\tStructType: s.v.Type(),\n\t\t\t\t\tFacetName:  facet.Name,\n\t\t\t\t\tReason:     fmt.Sprintf(\"type mismatch: %v for %d data\", ft, vt),\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tf.Set(v)\n\t}\n\treturn err\n}\n\nfunc (s structFLS) Save() ([]Field, *DocumentMetadata, error) {\n\tfields := make([]Field, 0, len(s.codec.fieldByName))\n\tvar facets []Facet\n\tfor i, tag := range s.codec.byIndex {\n\t\tf := s.v.Field(i)\n\t\tif !f.CanSet() {\n\t\t\tcontinue\n\t\t}\n\t\tif tag.facet {\n\t\t\tfacets = append(facets, Facet{Name: tag.name, Value: f.Interface()})\n\t\t} else {\n\t\t\tfields = append(fields, Field{Name: tag.name, Value: f.Interface()})\n\t\t}\n\t}\n\treturn fields, &DocumentMetadata{Facets: facets}, nil\n}\n\n// newStructFLS returns a FieldLoadSaver for the struct pointer p.\nfunc newStructFLS(p interface{}) (FieldLoadSaver, error) {\n\tv := reflect.ValueOf(p)\n\tif v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {\n\t\treturn nil, ErrInvalidDocumentType\n\t}\n\tcodec, err := loadCodec(v.Elem().Type())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn structFLS{v.Elem(), codec}, nil\n}\n\nfunc loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error {\n\tx, err := newStructFLS(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn x.Load(f, meta)\n}\n\nfunc saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) {\n\tx, err := newStructFLS(src)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn x.Save()\n}\n\n// LoadStruct loads the fields from f to dst. dst must be a struct pointer.\nfunc LoadStruct(dst interface{}, f []Field) error {\n\treturn loadStructWithMeta(dst, f, nil)\n}\n\n// SaveStruct returns the fields from src as a slice of Field.\n// src must be a struct pointer.\nfunc SaveStruct(src interface{}) ([]Field, error) {\n\tf, _, err := saveStructWithMeta(src)\n\treturn f, err\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/socket/doc.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package socket provides outbound network sockets.\n//\n// This package is only required in the classic App Engine environment.\n// Applications running only in the Managed VM hosting environment should\n// use the standard library's net package.\npackage socket\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/socket/socket_classic.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build appengine\n\npackage socket\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/appengine/internal\"\n\n\tpb \"google.golang.org/appengine/internal/socket\"\n)\n\n// Dial connects to the address addr on the network protocol.\n// The address format is host:port, where host may be a hostname or an IP address.\n// Known protocols are \"tcp\" and \"udp\".\n// The returned connection satisfies net.Conn, and is valid while ctx is valid;\n// if the connection is to be used after ctx becomes invalid, invoke SetContext\n// with the new context.\nfunc Dial(ctx context.Context, protocol, addr string) (*Conn, error) {\n\treturn DialTimeout(ctx, protocol, addr, 0)\n}\n\nvar ipFamilies = []pb.CreateSocketRequest_SocketFamily{\n\tpb.CreateSocketRequest_IPv4,\n\tpb.CreateSocketRequest_IPv6,\n}\n\n// DialTimeout is like Dial but takes a timeout.\n// The timeout includes name resolution, if required.\nfunc DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {\n\tdialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn.\n\tif timeout > 0 {\n\t\tvar cancel context.CancelFunc\n\t\tdialCtx, cancel = context.WithTimeout(ctx, timeout)\n\t\tdefer cancel()\n\t}\n\n\thost, portStr, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tport, err := strconv.Atoi(portStr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"socket: bad port %q: %v\", portStr, err)\n\t}\n\n\tvar prot pb.CreateSocketRequest_SocketProtocol\n\tswitch protocol {\n\tcase \"tcp\":\n\t\tprot = pb.CreateSocketRequest_TCP\n\tcase \"udp\":\n\t\tprot = pb.CreateSocketRequest_UDP\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"socket: unknown protocol %q\", protocol)\n\t}\n\n\tpackedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"socket: failed resolving %q: %v\", host, err)\n\t}\n\tif len(packedAddrs) == 0 {\n\t\treturn nil, fmt.Errorf(\"no addresses for %q\", host)\n\t}\n\n\tpackedAddr := packedAddrs[0] // use first address\n\tfam := pb.CreateSocketRequest_IPv4\n\tif len(packedAddr) == net.IPv6len {\n\t\tfam = pb.CreateSocketRequest_IPv6\n\t}\n\n\treq := &pb.CreateSocketRequest{\n\t\tFamily:   fam.Enum(),\n\t\tProtocol: prot.Enum(),\n\t\tRemoteIp: &pb.AddressPort{\n\t\t\tPort:          proto.Int32(int32(port)),\n\t\t\tPackedAddress: packedAddr,\n\t\t},\n\t}\n\tif resolved {\n\t\treq.RemoteIp.HostnameHint = &host\n\t}\n\tres := &pb.CreateSocketReply{}\n\tif err := internal.Call(dialCtx, \"remote_socket\", \"CreateSocket\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Conn{\n\t\tctx:    ctx,\n\t\tdesc:   res.GetSocketDescriptor(),\n\t\tprot:   prot,\n\t\tlocal:  res.ProxyExternalIp,\n\t\tremote: req.RemoteIp,\n\t}, nil\n}\n\n// LookupIP returns the given host's IP addresses.\nfunc LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {\n\tpackedAddrs, _, err := resolve(ctx, ipFamilies, host)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"socket: failed resolving %q: %v\", host, err)\n\t}\n\taddrs = make([]net.IP, len(packedAddrs))\n\tfor i, pa := range packedAddrs {\n\t\taddrs[i] = net.IP(pa)\n\t}\n\treturn addrs, nil\n}\n\nfunc resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) {\n\t// Check if it's an IP address.\n\tif ip := net.ParseIP(host); ip != nil {\n\t\tif ip := ip.To4(); ip != nil {\n\t\t\treturn [][]byte{ip}, false, nil\n\t\t}\n\t\treturn [][]byte{ip}, false, nil\n\t}\n\n\treq := &pb.ResolveRequest{\n\t\tName:            &host,\n\t\tAddressFamilies: fams,\n\t}\n\tres := &pb.ResolveReply{}\n\tif err := internal.Call(ctx, \"remote_socket\", \"Resolve\", req, res); err != nil {\n\t\t// XXX: need to map to pb.ResolveReply_ErrorCode?\n\t\treturn nil, false, err\n\t}\n\treturn res.PackedAddress, true, nil\n}\n\n// withDeadline is like context.WithDeadline, except it ignores the zero deadline.\nfunc withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {\n\tif deadline.IsZero() {\n\t\treturn parent, func() {}\n\t}\n\treturn context.WithDeadline(parent, deadline)\n}\n\n// Conn represents a socket connection.\n// It implements net.Conn.\ntype Conn struct {\n\tctx    context.Context\n\tdesc   string\n\toffset int64\n\n\tprot          pb.CreateSocketRequest_SocketProtocol\n\tlocal, remote *pb.AddressPort\n\n\treadDeadline, writeDeadline time.Time // optional\n}\n\n// SetContext sets the context that is used by this Conn.\n// It is usually used only when using a Conn that was created in a different context,\n// such as when a connection is created during a warmup request but used while\n// servicing a user request.\nfunc (cn *Conn) SetContext(ctx context.Context) {\n\tcn.ctx = ctx\n}\n\nfunc (cn *Conn) Read(b []byte) (n int, err error) {\n\tconst maxRead = 1 << 20\n\tif len(b) > maxRead {\n\t\tb = b[:maxRead]\n\t}\n\n\treq := &pb.ReceiveRequest{\n\t\tSocketDescriptor: &cn.desc,\n\t\tDataSize:         proto.Int32(int32(len(b))),\n\t}\n\tres := &pb.ReceiveReply{}\n\tif !cn.readDeadline.IsZero() {\n\t\treq.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds())\n\t}\n\tctx, cancel := withDeadline(cn.ctx, cn.readDeadline)\n\tdefer cancel()\n\tif err := internal.Call(ctx, \"remote_socket\", \"Receive\", req, res); err != nil {\n\t\treturn 0, err\n\t}\n\tif len(res.Data) == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tif len(res.Data) > len(b) {\n\t\treturn 0, fmt.Errorf(\"socket: internal error: read too much data: %d > %d\", len(res.Data), len(b))\n\t}\n\treturn copy(b, res.Data), nil\n}\n\nfunc (cn *Conn) Write(b []byte) (n int, err error) {\n\tconst lim = 1 << 20 // max per chunk\n\n\tfor n < len(b) {\n\t\tchunk := b[n:]\n\t\tif len(chunk) > lim {\n\t\t\tchunk = chunk[:lim]\n\t\t}\n\n\t\treq := &pb.SendRequest{\n\t\t\tSocketDescriptor: &cn.desc,\n\t\t\tData:             chunk,\n\t\t\tStreamOffset:     &cn.offset,\n\t\t}\n\t\tres := &pb.SendReply{}\n\t\tif !cn.writeDeadline.IsZero() {\n\t\t\treq.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds())\n\t\t}\n\t\tctx, cancel := withDeadline(cn.ctx, cn.writeDeadline)\n\t\tdefer cancel()\n\t\tif err = internal.Call(ctx, \"remote_socket\", \"Send\", req, res); err != nil {\n\t\t\t// assume zero bytes were sent in this RPC\n\t\t\tbreak\n\t\t}\n\t\tn += int(res.GetDataSent())\n\t\tcn.offset += int64(res.GetDataSent())\n\t}\n\n\treturn\n}\n\nfunc (cn *Conn) Close() error {\n\treq := &pb.CloseRequest{\n\t\tSocketDescriptor: &cn.desc,\n\t}\n\tres := &pb.CloseReply{}\n\tif err := internal.Call(cn.ctx, \"remote_socket\", \"Close\", req, res); err != nil {\n\t\treturn err\n\t}\n\tcn.desc = \"CLOSED\"\n\treturn nil\n}\n\nfunc addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr {\n\tif ap == nil {\n\t\treturn nil\n\t}\n\tswitch prot {\n\tcase pb.CreateSocketRequest_TCP:\n\t\treturn &net.TCPAddr{\n\t\t\tIP:   net.IP(ap.PackedAddress),\n\t\t\tPort: int(*ap.Port),\n\t\t}\n\tcase pb.CreateSocketRequest_UDP:\n\t\treturn &net.UDPAddr{\n\t\t\tIP:   net.IP(ap.PackedAddress),\n\t\t\tPort: int(*ap.Port),\n\t\t}\n\t}\n\tpanic(\"unknown protocol \" + prot.String())\n}\n\nfunc (cn *Conn) LocalAddr() net.Addr  { return addr(cn.prot, cn.local) }\nfunc (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) }\n\nfunc (cn *Conn) SetDeadline(t time.Time) error {\n\tcn.readDeadline = t\n\tcn.writeDeadline = t\n\treturn nil\n}\n\nfunc (cn *Conn) SetReadDeadline(t time.Time) error {\n\tcn.readDeadline = t\n\treturn nil\n}\n\nfunc (cn *Conn) SetWriteDeadline(t time.Time) error {\n\tcn.writeDeadline = t\n\treturn nil\n}\n\n// KeepAlive signals that the connection is still in use.\n// It may be called to prevent the socket being closed due to inactivity.\nfunc (cn *Conn) KeepAlive() error {\n\treq := &pb.GetSocketNameRequest{\n\t\tSocketDescriptor: &cn.desc,\n\t}\n\tres := &pb.GetSocketNameReply{}\n\treturn internal.Call(cn.ctx, \"remote_socket\", \"GetSocketName\", req, res)\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"remote_socket\", pb.RemoteSocketServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/socket/socket_vm.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build !appengine\n\npackage socket\n\nimport (\n\t\"net\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// Dial connects to the address addr on the network protocol.\n// The address format is host:port, where host may be a hostname or an IP address.\n// Known protocols are \"tcp\" and \"udp\".\n// The returned connection satisfies net.Conn, and is valid while ctx is valid;\n// if the connection is to be used after ctx becomes invalid, invoke SetContext\n// with the new context.\nfunc Dial(ctx context.Context, protocol, addr string) (*Conn, error) {\n\tconn, err := net.Dial(protocol, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Conn{conn}, nil\n}\n\n// DialTimeout is like Dial but takes a timeout.\n// The timeout includes name resolution, if required.\nfunc DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {\n\tconn, err := net.DialTimeout(protocol, addr, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Conn{conn}, nil\n}\n\n// LookupIP returns the given host's IP addresses.\nfunc LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {\n\treturn net.LookupIP(host)\n}\n\n// Conn represents a socket connection.\n// It implements net.Conn.\ntype Conn struct {\n\tnet.Conn\n}\n\n// SetContext sets the context that is used by this Conn.\n// It is usually used only when using a Conn that was created in a different context,\n// such as when a connection is created during a warmup request but used while\n// servicing a user request.\nfunc (cn *Conn) SetContext(ctx context.Context) {\n\t// This function is not required on managed VMs.\n}\n\n// KeepAlive signals that the connection is still in use.\n// It may be called to prevent the socket being closed due to inactivity.\nfunc (cn *Conn) KeepAlive() error {\n\t// This function is not required on managed VMs.\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/taskqueue/taskqueue.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage taskqueue provides a client for App Engine's taskqueue service.\nUsing this service, applications may perform work outside a user's request.\n\nA Task may be constructed manually; alternatively, since the most common\ntaskqueue operation is to add a single POST task, NewPOSTTask makes it easy.\n\n\tt := taskqueue.NewPOSTTask(\"/worker\", url.Values{\n\t\t\"key\": {key},\n\t})\n\ttaskqueue.Add(c, t, \"\") // add t to the default queue\n*/\npackage taskqueue\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tdspb \"google.golang.org/appengine/internal/datastore\"\n\tpb \"google.golang.org/appengine/internal/taskqueue\"\n)\n\nvar (\n\t// ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name.\n\tErrTaskAlreadyAdded = errors.New(\"taskqueue: task has already been added\")\n)\n\n// RetryOptions let you control whether to retry a task and the backoff intervals between tries.\ntype RetryOptions struct {\n\t// Number of tries/leases after which the task fails permanently and is deleted.\n\t// If AgeLimit is also set, both limits must be exceeded for the task to fail permanently.\n\tRetryLimit int32\n\n\t// Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks).\n\t// If RetryLimit is also set, both limits must be exceeded for the task to fail permanently.\n\tAgeLimit time.Duration\n\n\t// Minimum time between successive tries (only for push tasks).\n\tMinBackoff time.Duration\n\n\t// Maximum time between successive tries (only for push tasks).\n\tMaxBackoff time.Duration\n\n\t// Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks).\n\tMaxDoublings int32\n\n\t// If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value.\n\t// Otherwise a zero MaxDoublings is ignored and the default is used.\n\tApplyZeroMaxDoublings bool\n}\n\n// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters.\nfunc (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters {\n\tparams := &pb.TaskQueueRetryParameters{}\n\tif opt.RetryLimit > 0 {\n\t\tparams.RetryLimit = proto.Int32(opt.RetryLimit)\n\t}\n\tif opt.AgeLimit > 0 {\n\t\tparams.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds()))\n\t}\n\tif opt.MinBackoff > 0 {\n\t\tparams.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds())\n\t}\n\tif opt.MaxBackoff > 0 {\n\t\tparams.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds())\n\t}\n\tif opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) {\n\t\tparams.MaxDoublings = proto.Int32(opt.MaxDoublings)\n\t}\n\treturn params\n}\n\n// A Task represents a task to be executed.\ntype Task struct {\n\t// Path is the worker URL for the task.\n\t// If unset, it will default to /_ah/queue/<queue_name>.\n\tPath string\n\n\t// Payload is the data for the task.\n\t// This will be delivered as the HTTP request body.\n\t// It is only used when Method is POST, PUT or PULL.\n\t// url.Values' Encode method may be used to generate this for POST requests.\n\tPayload []byte\n\n\t// Additional HTTP headers to pass at the task's execution time.\n\t// To schedule the task to be run with an alternate app version\n\t// or backend, set the \"Host\" header.\n\tHeader http.Header\n\n\t// Method is the HTTP method for the task (\"GET\", \"POST\", etc.),\n\t// or \"PULL\" if this is task is destined for a pull-based queue.\n\t// If empty, this defaults to \"POST\".\n\tMethod string\n\n\t// A name for the task.\n\t// If empty, a name will be chosen.\n\tName string\n\n\t// Delay specifies the duration the task queue service must wait\n\t// before executing the task.\n\t// Either Delay or ETA may be set, but not both.\n\tDelay time.Duration\n\n\t// ETA specifies the earliest time a task may be executed (push queues)\n\t// or leased (pull queues).\n\t// Either Delay or ETA may be set, but not both.\n\tETA time.Time\n\n\t// The number of times the task has been dispatched or leased.\n\tRetryCount int32\n\n\t// Tag for the task. Only used when Method is PULL.\n\tTag string\n\n\t// Retry options for this task. May be nil.\n\tRetryOptions *RetryOptions\n}\n\nfunc (t *Task) method() string {\n\tif t.Method == \"\" {\n\t\treturn \"POST\"\n\t}\n\treturn t.Method\n}\n\n// NewPOSTTask creates a Task that will POST to a path with the given form data.\nfunc NewPOSTTask(path string, params url.Values) *Task {\n\th := make(http.Header)\n\th.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treturn &Task{\n\t\tPath:    path,\n\t\tPayload: []byte(params.Encode()),\n\t\tHeader:  h,\n\t\tMethod:  \"POST\",\n\t}\n}\n\nvar (\n\tcurrentNamespace = http.CanonicalHeaderKey(\"X-AppEngine-Current-Namespace\")\n\tdefaultNamespace = http.CanonicalHeaderKey(\"X-AppEngine-Default-Namespace\")\n)\n\nfunc getDefaultNamespace(ctx context.Context) string {\n\treturn internal.IncomingHeaders(ctx).Get(defaultNamespace)\n}\n\nfunc newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) {\n\tif queueName == \"\" {\n\t\tqueueName = \"default\"\n\t}\n\tpath := task.Path\n\tif path == \"\" {\n\t\tpath = \"/_ah/queue/\" + queueName\n\t}\n\teta := task.ETA\n\tif eta.IsZero() {\n\t\teta = time.Now().Add(task.Delay)\n\t} else if task.Delay != 0 {\n\t\tpanic(\"taskqueue: both Delay and ETA are set\")\n\t}\n\treq := &pb.TaskQueueAddRequest{\n\t\tQueueName: []byte(queueName),\n\t\tTaskName:  []byte(task.Name),\n\t\tEtaUsec:   proto.Int64(eta.UnixNano() / 1e3),\n\t}\n\tmethod := task.method()\n\tif method == \"PULL\" {\n\t\t// Pull-based task\n\t\treq.Body = task.Payload\n\t\treq.Mode = pb.TaskQueueMode_PULL.Enum()\n\t\tif task.Tag != \"\" {\n\t\t\treq.Tag = []byte(task.Tag)\n\t\t}\n\t} else {\n\t\t// HTTP-based task\n\t\tif v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok {\n\t\t\treq.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"taskqueue: bad method %q\", method)\n\t\t}\n\t\treq.Url = []byte(path)\n\t\tfor k, vs := range task.Header {\n\t\t\tfor _, v := range vs {\n\t\t\t\treq.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{\n\t\t\t\t\tKey:   []byte(k),\n\t\t\t\t\tValue: []byte(v),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\tif method == \"POST\" || method == \"PUT\" {\n\t\t\treq.Body = task.Payload\n\t\t}\n\n\t\t// Namespace headers.\n\t\tif _, ok := task.Header[currentNamespace]; !ok {\n\t\t\t// Fetch the current namespace of this request.\n\t\t\tns := internal.NamespaceFromContext(c)\n\t\t\treq.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{\n\t\t\t\tKey:   []byte(currentNamespace),\n\t\t\t\tValue: []byte(ns),\n\t\t\t})\n\t\t}\n\t\tif _, ok := task.Header[defaultNamespace]; !ok {\n\t\t\t// Fetch the X-AppEngine-Default-Namespace header of this request.\n\t\t\tif ns := getDefaultNamespace(c); ns != \"\" {\n\t\t\t\treq.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{\n\t\t\t\t\tKey:   []byte(defaultNamespace),\n\t\t\t\t\tValue: []byte(ns),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tif task.RetryOptions != nil {\n\t\treq.RetryParameters = task.RetryOptions.toRetryParameters()\n\t}\n\n\treturn req, nil\n}\n\nvar alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{\n\tpb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true,\n\tpb.TaskQueueServiceError_TOMBSTONED_TASK:     true,\n}\n\n// Add adds the task to a named queue.\n// An empty queue name means that the default queue will be used.\n// Add returns an equivalent Task with defaults filled in, including setting\n// the task's Name field to the chosen name if the original was empty.\nfunc Add(c context.Context, task *Task, queueName string) (*Task, error) {\n\treq, err := newAddReq(c, task, queueName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := &pb.TaskQueueAddResponse{}\n\tif err := internal.Call(c, \"taskqueue\", \"Add\", req, res); err != nil {\n\t\tapiErr, ok := err.(*internal.APIError)\n\t\tif ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] {\n\t\t\treturn nil, ErrTaskAlreadyAdded\n\t\t}\n\t\treturn nil, err\n\t}\n\tresultTask := *task\n\tresultTask.Method = task.method()\n\tif task.Name == \"\" {\n\t\tresultTask.Name = string(res.ChosenTaskName)\n\t}\n\treturn &resultTask, nil\n}\n\n// AddMulti adds multiple tasks to a named queue.\n// An empty queue name means that the default queue will be used.\n// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting\n// each task's Name field to the chosen name if the original was empty.\n// If a given task is badly formed or could not be added, an appengine.MultiError is returned.\nfunc AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) {\n\treq := &pb.TaskQueueBulkAddRequest{\n\t\tAddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)),\n\t}\n\tme, any := make(appengine.MultiError, len(tasks)), false\n\tfor i, t := range tasks {\n\t\treq.AddRequest[i], me[i] = newAddReq(c, t, queueName)\n\t\tany = any || me[i] != nil\n\t}\n\tif any {\n\t\treturn nil, me\n\t}\n\tres := &pb.TaskQueueBulkAddResponse{}\n\tif err := internal.Call(c, \"taskqueue\", \"BulkAdd\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(res.Taskresult) != len(tasks) {\n\t\treturn nil, errors.New(\"taskqueue: server error\")\n\t}\n\ttasksOut := make([]*Task, len(tasks))\n\tfor i, tr := range res.Taskresult {\n\t\ttasksOut[i] = new(Task)\n\t\t*tasksOut[i] = *tasks[i]\n\t\ttasksOut[i].Method = tasksOut[i].method()\n\t\tif tasksOut[i].Name == \"\" {\n\t\t\ttasksOut[i].Name = string(tr.ChosenTaskName)\n\t\t}\n\t\tif *tr.Result != pb.TaskQueueServiceError_OK {\n\t\t\tif alreadyAddedErrors[*tr.Result] {\n\t\t\t\tme[i] = ErrTaskAlreadyAdded\n\t\t\t} else {\n\t\t\t\tme[i] = &internal.APIError{\n\t\t\t\t\tService: \"taskqueue\",\n\t\t\t\t\tCode:    int32(*tr.Result),\n\t\t\t\t}\n\t\t\t}\n\t\t\tany = true\n\t\t}\n\t}\n\tif any {\n\t\treturn tasksOut, me\n\t}\n\treturn tasksOut, nil\n}\n\n// Delete deletes a task from a named queue.\nfunc Delete(c context.Context, task *Task, queueName string) error {\n\terr := DeleteMulti(c, []*Task{task}, queueName)\n\tif me, ok := err.(appengine.MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// DeleteMulti deletes multiple tasks from a named queue.\n// If a given task could not be deleted, an appengine.MultiError is returned.\nfunc DeleteMulti(c context.Context, tasks []*Task, queueName string) error {\n\ttaskNames := make([][]byte, len(tasks))\n\tfor i, t := range tasks {\n\t\ttaskNames[i] = []byte(t.Name)\n\t}\n\tif queueName == \"\" {\n\t\tqueueName = \"default\"\n\t}\n\treq := &pb.TaskQueueDeleteRequest{\n\t\tQueueName: []byte(queueName),\n\t\tTaskName:  taskNames,\n\t}\n\tres := &pb.TaskQueueDeleteResponse{}\n\tif err := internal.Call(c, \"taskqueue\", \"Delete\", req, res); err != nil {\n\t\treturn err\n\t}\n\tif a, b := len(req.TaskName), len(res.Result); a != b {\n\t\treturn fmt.Errorf(\"taskqueue: internal error: requested deletion of %d tasks, got %d results\", a, b)\n\t}\n\tme, any := make(appengine.MultiError, len(res.Result)), false\n\tfor i, ec := range res.Result {\n\t\tif ec != pb.TaskQueueServiceError_OK {\n\t\t\tme[i] = &internal.APIError{\n\t\t\t\tService: \"taskqueue\",\n\t\t\t\tCode:    int32(ec),\n\t\t\t}\n\t\t\tany = true\n\t\t}\n\t}\n\tif any {\n\t\treturn me\n\t}\n\treturn nil\n}\n\nfunc lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) {\n\tif queueName == \"\" {\n\t\tqueueName = \"default\"\n\t}\n\treq := &pb.TaskQueueQueryAndOwnTasksRequest{\n\t\tQueueName:    []byte(queueName),\n\t\tLeaseSeconds: proto.Float64(float64(leaseTime)),\n\t\tMaxTasks:     proto.Int64(int64(maxTasks)),\n\t\tGroupByTag:   proto.Bool(groupByTag),\n\t\tTag:          tag,\n\t}\n\tres := &pb.TaskQueueQueryAndOwnTasksResponse{}\n\tif err := internal.Call(c, \"taskqueue\", \"QueryAndOwnTasks\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\ttasks := make([]*Task, len(res.Task))\n\tfor i, t := range res.Task {\n\t\ttasks[i] = &Task{\n\t\t\tPayload:    t.Body,\n\t\t\tName:       string(t.TaskName),\n\t\t\tMethod:     \"PULL\",\n\t\t\tETA:        time.Unix(0, *t.EtaUsec*1e3),\n\t\t\tRetryCount: *t.RetryCount,\n\t\t\tTag:        string(t.Tag),\n\t\t}\n\t}\n\treturn tasks, nil\n}\n\n// Lease leases tasks from a queue.\n// leaseTime is in seconds.\n// The number of tasks fetched will be at most maxTasks.\nfunc Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) {\n\treturn lease(c, maxTasks, queueName, leaseTime, false, nil)\n}\n\n// LeaseByTag leases tasks from a queue, grouped by tag.\n// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA.\n// leaseTime is in seconds.\n// The number of tasks fetched will be at most maxTasks.\nfunc LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) {\n\treturn lease(c, maxTasks, queueName, leaseTime, true, []byte(tag))\n}\n\n// Purge removes all tasks from a queue.\nfunc Purge(c context.Context, queueName string) error {\n\tif queueName == \"\" {\n\t\tqueueName = \"default\"\n\t}\n\treq := &pb.TaskQueuePurgeQueueRequest{\n\t\tQueueName: []byte(queueName),\n\t}\n\tres := &pb.TaskQueuePurgeQueueResponse{}\n\treturn internal.Call(c, \"taskqueue\", \"PurgeQueue\", req, res)\n}\n\n// ModifyLease modifies the lease of a task.\n// Used to request more processing time, or to abandon processing.\n// leaseTime is in seconds and must not be negative.\nfunc ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error {\n\tif queueName == \"\" {\n\t\tqueueName = \"default\"\n\t}\n\treq := &pb.TaskQueueModifyTaskLeaseRequest{\n\t\tQueueName:    []byte(queueName),\n\t\tTaskName:     []byte(task.Name),\n\t\tEtaUsec:      proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership.\n\t\tLeaseSeconds: proto.Float64(float64(leaseTime)),\n\t}\n\tres := &pb.TaskQueueModifyTaskLeaseResponse{}\n\tif err := internal.Call(c, \"taskqueue\", \"ModifyTaskLease\", req, res); err != nil {\n\t\treturn err\n\t}\n\ttask.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3)\n\treturn nil\n}\n\n// QueueStatistics represents statistics about a single task queue.\ntype QueueStatistics struct {\n\tTasks     int       // may be an approximation\n\tOldestETA time.Time // zero if there are no pending tasks\n\n\tExecuted1Minute int     // tasks executed in the last minute\n\tInFlight        int     // tasks executing now\n\tEnforcedRate    float64 // requests per second\n}\n\n// QueueStats retrieves statistics about queues.\nfunc QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) {\n\treq := &pb.TaskQueueFetchQueueStatsRequest{\n\t\tQueueName: make([][]byte, len(queueNames)),\n\t}\n\tfor i, q := range queueNames {\n\t\tif q == \"\" {\n\t\t\tq = \"default\"\n\t\t}\n\t\treq.QueueName[i] = []byte(q)\n\t}\n\tres := &pb.TaskQueueFetchQueueStatsResponse{}\n\tif err := internal.Call(c, \"taskqueue\", \"FetchQueueStats\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\tqs := make([]QueueStatistics, len(res.Queuestats))\n\tfor i, qsg := range res.Queuestats {\n\t\tqs[i] = QueueStatistics{\n\t\t\tTasks: int(*qsg.NumTasks),\n\t\t}\n\t\tif eta := *qsg.OldestEtaUsec; eta > -1 {\n\t\t\tqs[i].OldestETA = time.Unix(0, eta*1e3)\n\t\t}\n\t\tif si := qsg.ScannerInfo; si != nil {\n\t\t\tqs[i].Executed1Minute = int(*si.ExecutedLastMinute)\n\t\t\tqs[i].InFlight = int(si.GetRequestsInFlight())\n\t\t\tqs[i].EnforcedRate = si.GetEnforcedRate()\n\t\t}\n\t}\n\treturn qs, nil\n}\n\nfunc setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) {\n\tx.Transaction = t\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"taskqueue\", pb.TaskQueueServiceError_ErrorCode_name)\n\n\t// Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue.\n\tdsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT)\n\tinternal.RegisterTimeoutErrorCode(\"taskqueue\", dsCode)\n\n\t// Transaction registration.\n\tinternal.RegisterTransactionSetter(setTransaction)\n\tinternal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) {\n\t\tfor _, req := range x.AddRequest {\n\t\t\tsetTransaction(req, t)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/timeout.go",
    "content": "// Copyright 2013 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage appengine\n\nimport \"golang.org/x/net/context\"\n\n// IsTimeoutError reports whether err is a timeout error.\nfunc IsTimeoutError(err error) bool {\n\tif err == context.DeadlineExceeded {\n\t\treturn true\n\t}\n\tif t, ok := err.(interface {\n\t\tIsTimeout() bool\n\t}); ok {\n\t\treturn t.IsTimeout()\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/urlfetch/urlfetch.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package urlfetch provides an http.RoundTripper implementation\n// for fetching URLs via App Engine's urlfetch service.\npackage urlfetch\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/urlfetch\"\n)\n\n// Transport is an implementation of http.RoundTripper for\n// App Engine. Users should generally create an http.Client using\n// this transport and use the Client rather than using this transport\n// directly.\ntype Transport struct {\n\tContext context.Context\n\n\t// Controls whether the application checks the validity of SSL certificates\n\t// over HTTPS connections. A value of false (the default) instructs the\n\t// application to send a request to the server only if the certificate is\n\t// valid and signed by a trusted certificate authority (CA), and also\n\t// includes a hostname that matches the certificate. A value of true\n\t// instructs the application to perform no certificate validation.\n\tAllowInvalidServerCertificate bool\n}\n\n// Verify statically that *Transport implements http.RoundTripper.\nvar _ http.RoundTripper = (*Transport)(nil)\n\n// Client returns an *http.Client using a default urlfetch Transport. This\n// client will have the default deadline of 5 seconds, and will check the\n// validity of SSL certificates.\n//\n// Any deadline of the provided context will be used for requests through this client;\n// if the client does not have a deadline then a 5 second default is used.\nfunc Client(ctx context.Context) *http.Client {\n\treturn &http.Client{\n\t\tTransport: &Transport{\n\t\t\tContext: ctx,\n\t\t},\n\t}\n}\n\ntype bodyReader struct {\n\tcontent   []byte\n\ttruncated bool\n\tclosed    bool\n}\n\n// ErrTruncatedBody is the error returned after the final Read() from a\n// response's Body if the body has been truncated by App Engine's proxy.\nvar ErrTruncatedBody = errors.New(\"urlfetch: truncated body\")\n\nfunc statusCodeToText(code int) string {\n\tif t := http.StatusText(code); t != \"\" {\n\t\treturn t\n\t}\n\treturn strconv.Itoa(code)\n}\n\nfunc (br *bodyReader) Read(p []byte) (n int, err error) {\n\tif br.closed {\n\t\tif br.truncated {\n\t\t\treturn 0, ErrTruncatedBody\n\t\t}\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(p, br.content)\n\tif n > 0 {\n\t\tbr.content = br.content[n:]\n\t\treturn\n\t}\n\tif br.truncated {\n\t\tbr.closed = true\n\t\treturn 0, ErrTruncatedBody\n\t}\n\treturn 0, io.EOF\n}\n\nfunc (br *bodyReader) Close() error {\n\tbr.closed = true\n\tbr.content = nil\n\treturn nil\n}\n\n// A map of the URL Fetch-accepted methods that take a request body.\nvar methodAcceptsRequestBody = map[string]bool{\n\t\"POST\":  true,\n\t\"PUT\":   true,\n\t\"PATCH\": true,\n}\n\n// urlString returns a valid string given a URL. This function is necessary because\n// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.\n// See http://code.google.com/p/go/issues/detail?id=4860.\nfunc urlString(u *url.URL) string {\n\tif u.Opaque == \"\" || strings.HasPrefix(u.Opaque, \"//\") {\n\t\treturn u.String()\n\t}\n\taux := *u\n\taux.Opaque = \"//\" + aux.Host + aux.Opaque\n\treturn aux.String()\n}\n\n// RoundTrip issues a single HTTP request and returns its response. Per the\n// http.RoundTripper interface, RoundTrip only returns an error if there\n// was an unsupported request or the URL Fetch proxy fails.\n// Note that HTTP response codes such as 5xx, 403, 404, etc are not\n// errors as far as the transport is concerned and will be returned\n// with err set to nil.\nfunc (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {\n\tmethNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"urlfetch: unsupported HTTP method %q\", req.Method)\n\t}\n\n\tmethod := pb.URLFetchRequest_RequestMethod(methNum)\n\n\tfreq := &pb.URLFetchRequest{\n\t\tMethod:                        &method,\n\t\tUrl:                           proto.String(urlString(req.URL)),\n\t\tFollowRedirects:               proto.Bool(false), // http.Client's responsibility\n\t\tMustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),\n\t}\n\tif deadline, ok := t.Context.Deadline(); ok {\n\t\tfreq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())\n\t}\n\n\tfor k, vals := range req.Header {\n\t\tfor _, val := range vals {\n\t\t\tfreq.Header = append(freq.Header, &pb.URLFetchRequest_Header{\n\t\t\t\tKey:   proto.String(k),\n\t\t\t\tValue: proto.String(val),\n\t\t\t})\n\t\t}\n\t}\n\tif methodAcceptsRequestBody[req.Method] && req.Body != nil {\n\t\t// Avoid a []byte copy if req.Body has a Bytes method.\n\t\tswitch b := req.Body.(type) {\n\t\tcase interface {\n\t\t\tBytes() []byte\n\t\t}:\n\t\t\tfreq.Payload = b.Bytes()\n\t\tdefault:\n\t\t\tfreq.Payload, err = ioutil.ReadAll(req.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tfres := &pb.URLFetchResponse{}\n\tif err := internal.Call(t.Context, \"urlfetch\", \"Fetch\", freq, fres); err != nil {\n\t\treturn nil, err\n\t}\n\n\tres = &http.Response{}\n\tres.StatusCode = int(*fres.StatusCode)\n\tres.Status = fmt.Sprintf(\"%d %s\", res.StatusCode, statusCodeToText(res.StatusCode))\n\tres.Header = make(http.Header)\n\tres.Request = req\n\n\t// Faked:\n\tres.ProtoMajor = 1\n\tres.ProtoMinor = 1\n\tres.Proto = \"HTTP/1.1\"\n\tres.Close = true\n\n\tfor _, h := range fres.Header {\n\t\thkey := http.CanonicalHeaderKey(*h.Key)\n\t\thval := *h.Value\n\t\tif hkey == \"Content-Length\" {\n\t\t\t// Will get filled in below for all but HEAD requests.\n\t\t\tif req.Method == \"HEAD\" {\n\t\t\t\tres.ContentLength, _ = strconv.ParseInt(hval, 10, 64)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tres.Header.Add(hkey, hval)\n\t}\n\n\tif req.Method != \"HEAD\" {\n\t\tres.ContentLength = int64(len(fres.Content))\n\t}\n\n\ttruncated := fres.GetContentWasTruncated()\n\tres.Body = &bodyReader{content: fres.Content, truncated: truncated}\n\treturn\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"urlfetch\", pb.URLFetchServiceError_ErrorCode_name)\n\tinternal.RegisterTimeoutErrorCode(\"urlfetch\", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/user/oauth.go",
    "content": "// Copyright 2012 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\npackage user\n\nimport (\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/user\"\n)\n\n// CurrentOAuth returns the user associated with the OAuth consumer making this\n// request. If the OAuth consumer did not make a valid OAuth request, or the\n// scopes is non-empty and the current user does not have at least one of the\n// scopes, this method will return an error.\nfunc CurrentOAuth(c context.Context, scopes ...string) (*User, error) {\n\treq := &pb.GetOAuthUserRequest{}\n\tif len(scopes) != 1 || scopes[0] != \"\" {\n\t\t// The signature for this function used to be CurrentOAuth(Context, string).\n\t\t// Ignore the singular \"\" scope to preserve existing behavior.\n\t\treq.Scopes = scopes\n\t}\n\n\tres := &pb.GetOAuthUserResponse{}\n\n\terr := internal.Call(c, \"user\", \"GetOAuthUser\", req, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &User{\n\t\tEmail:      *res.Email,\n\t\tAuthDomain: *res.AuthDomain,\n\t\tAdmin:      res.GetIsAdmin(),\n\t\tID:         *res.UserId,\n\t\tClientID:   res.GetClientId(),\n\t}, nil\n}\n\n// OAuthConsumerKey returns the OAuth consumer key provided with the current\n// request. This method will return an error if the OAuth request was invalid.\nfunc OAuthConsumerKey(c context.Context) (string, error) {\n\treq := &pb.CheckOAuthSignatureRequest{}\n\tres := &pb.CheckOAuthSignatureResponse{}\n\n\terr := internal.Call(c, \"user\", \"CheckOAuthSignature\", req, res)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *res.OauthConsumerKey, err\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/user/user.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// Package user provides a client for App Engine's user authentication service.\npackage user\n\nimport (\n\t\"strings\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/user\"\n)\n\n// User represents a user of the application.\ntype User struct {\n\tEmail      string\n\tAuthDomain string\n\tAdmin      bool\n\n\t// ID is the unique permanent ID of the user.\n\t// It is populated if the Email is associated\n\t// with a Google account, or empty otherwise.\n\tID string\n\n\t// ClientID is the ID of the pre-registered client so its identity can be verified.\n\t// See https://developers.google.com/console/help/#generatingoauth2 for more information.\n\tClientID string\n\n\tFederatedIdentity string\n\tFederatedProvider string\n}\n\n// String returns a displayable name for the user.\nfunc (u *User) String() string {\n\tif u.AuthDomain != \"\" && strings.HasSuffix(u.Email, \"@\"+u.AuthDomain) {\n\t\treturn u.Email[:len(u.Email)-len(\"@\"+u.AuthDomain)]\n\t}\n\tif u.FederatedIdentity != \"\" {\n\t\treturn u.FederatedIdentity\n\t}\n\treturn u.Email\n}\n\n// LoginURL returns a URL that, when visited, prompts the user to sign in,\n// then redirects the user to the URL specified by dest.\nfunc LoginURL(c context.Context, dest string) (string, error) {\n\treturn LoginURLFederated(c, dest, \"\")\n}\n\n// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier.\nfunc LoginURLFederated(c context.Context, dest, identity string) (string, error) {\n\treq := &pb.CreateLoginURLRequest{\n\t\tDestinationUrl: proto.String(dest),\n\t}\n\tif identity != \"\" {\n\t\treq.FederatedIdentity = proto.String(identity)\n\t}\n\tres := &pb.CreateLoginURLResponse{}\n\tif err := internal.Call(c, \"user\", \"CreateLoginURL\", req, res); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *res.LoginUrl, nil\n}\n\n// LogoutURL returns a URL that, when visited, signs the user out,\n// then redirects the user to the URL specified by dest.\nfunc LogoutURL(c context.Context, dest string) (string, error) {\n\treq := &pb.CreateLogoutURLRequest{\n\t\tDestinationUrl: proto.String(dest),\n\t}\n\tres := &pb.CreateLogoutURLResponse{}\n\tif err := internal.Call(c, \"user\", \"CreateLogoutURL\", req, res); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *res.LogoutUrl, nil\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"user\", pb.UserServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/user/user_classic.go",
    "content": "// Copyright 2015 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build appengine\n\npackage user\n\nimport (\n\t\"appengine/user\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\nfunc Current(ctx context.Context) *User {\n\tu := user.Current(internal.ClassicContextFromContext(ctx))\n\tif u == nil {\n\t\treturn nil\n\t}\n\t// Map appengine/user.User to this package's User type.\n\treturn &User{\n\t\tEmail:             u.Email,\n\t\tAuthDomain:        u.AuthDomain,\n\t\tAdmin:             u.Admin,\n\t\tID:                u.ID,\n\t\tFederatedIdentity: u.FederatedIdentity,\n\t\tFederatedProvider: u.FederatedProvider,\n\t}\n}\n\nfunc IsAdmin(ctx context.Context) bool {\n\treturn user.IsAdmin(internal.ClassicContextFromContext(ctx))\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/user/user_vm.go",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n// +build !appengine\n\npackage user\n\nimport (\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine/internal\"\n)\n\n// Current returns the currently logged-in user,\n// or nil if the user is not signed in.\nfunc Current(c context.Context) *User {\n\th := internal.IncomingHeaders(c)\n\tu := &User{\n\t\tEmail:             h.Get(\"X-AppEngine-User-Email\"),\n\t\tAuthDomain:        h.Get(\"X-AppEngine-Auth-Domain\"),\n\t\tID:                h.Get(\"X-AppEngine-User-Id\"),\n\t\tAdmin:             h.Get(\"X-AppEngine-User-Is-Admin\") == \"1\",\n\t\tFederatedIdentity: h.Get(\"X-AppEngine-Federated-Identity\"),\n\t\tFederatedProvider: h.Get(\"X-AppEngine-Federated-Provider\"),\n\t}\n\tif u.Email == \"\" && u.FederatedIdentity == \"\" {\n\t\treturn nil\n\t}\n\treturn u\n}\n\n// IsAdmin returns true if the current user is signed in and\n// is currently registered as an administrator of the application.\nfunc IsAdmin(c context.Context) bool {\n\th := internal.IncomingHeaders(c)\n\treturn h.Get(\"X-AppEngine-User-Is-Admin\") == \"1\"\n}\n"
  },
  {
    "path": "vendor/google.golang.org/appengine/xmpp/xmpp.go",
    "content": "// Copyright 2011 Google Inc. All rights reserved.\n// Use of this source code is governed by the Apache 2.0\n// license that can be found in the LICENSE file.\n\n/*\nPackage xmpp provides the means to send and receive instant messages\nto and from users of XMPP-compatible services.\n\nTo send a message,\n\tm := &xmpp.Message{\n\t\tTo:   []string{\"kaylee@example.com\"},\n\t\tBody: `Hi! How's the carrot?`,\n\t}\n\terr := m.Send(c)\n\nTo receive messages,\n\tfunc init() {\n\t\txmpp.Handle(handleChat)\n\t}\n\n\tfunc handleChat(c context.Context, m *xmpp.Message) {\n\t\t// ...\n\t}\n*/\npackage xmpp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/internal\"\n\tpb \"google.golang.org/appengine/internal/xmpp\"\n)\n\n// Message represents an incoming chat message.\ntype Message struct {\n\t// Sender is the JID of the sender.\n\t// Optional for outgoing messages.\n\tSender string\n\n\t// To is the intended recipients of the message.\n\t// Incoming messages will have exactly one element.\n\tTo []string\n\n\t// Body is the body of the message.\n\tBody string\n\n\t// Type is the message type, per RFC 3921.\n\t// It defaults to \"chat\".\n\tType string\n\n\t// RawXML is whether the body contains raw XML.\n\tRawXML bool\n}\n\n// Presence represents an outgoing presence update.\ntype Presence struct {\n\t// Sender is the JID (optional).\n\tSender string\n\n\t// The intended recipient of the presence update.\n\tTo string\n\n\t// Type, per RFC 3921 (optional). Defaults to \"available\".\n\tType string\n\n\t// State of presence (optional).\n\t// Valid values: \"away\", \"chat\", \"xa\", \"dnd\" (RFC 3921).\n\tState string\n\n\t// Free text status message (optional).\n\tStatus string\n}\n\nvar (\n\tErrPresenceUnavailable = errors.New(\"xmpp: presence unavailable\")\n\tErrInvalidJID          = errors.New(\"xmpp: invalid JID\")\n)\n\n// Handle arranges for f to be called for incoming XMPP messages.\n// Only messages of type \"chat\" or \"normal\" will be handled.\nfunc Handle(f func(c context.Context, m *Message)) {\n\thttp.HandleFunc(\"/_ah/xmpp/message/chat/\", func(_ http.ResponseWriter, r *http.Request) {\n\t\tf(appengine.NewContext(r), &Message{\n\t\t\tSender: r.FormValue(\"from\"),\n\t\t\tTo:     []string{r.FormValue(\"to\")},\n\t\t\tBody:   r.FormValue(\"body\"),\n\t\t})\n\t})\n}\n\n// Send sends a message.\n// If any failures occur with specific recipients, the error will be an appengine.MultiError.\nfunc (m *Message) Send(c context.Context) error {\n\treq := &pb.XmppMessageRequest{\n\t\tJid:    m.To,\n\t\tBody:   &m.Body,\n\t\tRawXml: &m.RawXML,\n\t}\n\tif m.Type != \"\" && m.Type != \"chat\" {\n\t\treq.Type = &m.Type\n\t}\n\tif m.Sender != \"\" {\n\t\treq.FromJid = &m.Sender\n\t}\n\tres := &pb.XmppMessageResponse{}\n\tif err := internal.Call(c, \"xmpp\", \"SendMessage\", req, res); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res.Status) != len(req.Jid) {\n\t\treturn fmt.Errorf(\"xmpp: sent message to %d JIDs, but only got %d statuses back\", len(req.Jid), len(res.Status))\n\t}\n\tme, any := make(appengine.MultiError, len(req.Jid)), false\n\tfor i, st := range res.Status {\n\t\tif st != pb.XmppMessageResponse_NO_ERROR {\n\t\t\tme[i] = errors.New(st.String())\n\t\t\tany = true\n\t\t}\n\t}\n\tif any {\n\t\treturn me\n\t}\n\treturn nil\n}\n\n// Invite sends an invitation. If the from address is an empty string\n// the default (yourapp@appspot.com/bot) will be used.\nfunc Invite(c context.Context, to, from string) error {\n\treq := &pb.XmppInviteRequest{\n\t\tJid: &to,\n\t}\n\tif from != \"\" {\n\t\treq.FromJid = &from\n\t}\n\tres := &pb.XmppInviteResponse{}\n\treturn internal.Call(c, \"xmpp\", \"SendInvite\", req, res)\n}\n\n// Send sends a presence update.\nfunc (p *Presence) Send(c context.Context) error {\n\treq := &pb.XmppSendPresenceRequest{\n\t\tJid: &p.To,\n\t}\n\tif p.State != \"\" {\n\t\treq.Show = &p.State\n\t}\n\tif p.Type != \"\" {\n\t\treq.Type = &p.Type\n\t}\n\tif p.Sender != \"\" {\n\t\treq.FromJid = &p.Sender\n\t}\n\tif p.Status != \"\" {\n\t\treq.Status = &p.Status\n\t}\n\tres := &pb.XmppSendPresenceResponse{}\n\treturn internal.Call(c, \"xmpp\", \"SendPresence\", req, res)\n}\n\nvar presenceMap = map[pb.PresenceResponse_SHOW]string{\n\tpb.PresenceResponse_NORMAL:         \"\",\n\tpb.PresenceResponse_AWAY:           \"away\",\n\tpb.PresenceResponse_DO_NOT_DISTURB: \"dnd\",\n\tpb.PresenceResponse_CHAT:           \"chat\",\n\tpb.PresenceResponse_EXTENDED_AWAY:  \"xa\",\n}\n\n// GetPresence retrieves a user's presence.\n// If the from address is an empty string the default\n// (yourapp@appspot.com/bot) will be used.\n// Possible return values are \"\", \"away\", \"dnd\", \"chat\", \"xa\".\n// ErrPresenceUnavailable is returned if the presence is unavailable.\nfunc GetPresence(c context.Context, to string, from string) (string, error) {\n\treq := &pb.PresenceRequest{\n\t\tJid: &to,\n\t}\n\tif from != \"\" {\n\t\treq.FromJid = &from\n\t}\n\tres := &pb.PresenceResponse{}\n\tif err := internal.Call(c, \"xmpp\", \"GetPresence\", req, res); err != nil {\n\t\treturn \"\", err\n\t}\n\tif !*res.IsAvailable || res.Presence == nil {\n\t\treturn \"\", ErrPresenceUnavailable\n\t}\n\tpresence, ok := presenceMap[*res.Presence]\n\tif ok {\n\t\treturn presence, nil\n\t}\n\treturn \"\", fmt.Errorf(\"xmpp: unknown presence %v\", *res.Presence)\n}\n\n// GetPresenceMulti retrieves multiple users' presence.\n// If the from address is an empty string the default\n// (yourapp@appspot.com/bot) will be used.\n// Possible return values are \"\", \"away\", \"dnd\", \"chat\", \"xa\".\n// If any presence is unavailable, an appengine.MultiError is returned\nfunc GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) {\n\treq := &pb.BulkPresenceRequest{\n\t\tJid: to,\n\t}\n\tif from != \"\" {\n\t\treq.FromJid = &from\n\t}\n\tres := &pb.BulkPresenceResponse{}\n\n\tif err := internal.Call(c, \"xmpp\", \"BulkGetPresence\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpresences := make([]string, 0, len(res.PresenceResponse))\n\terrs := appengine.MultiError{}\n\n\taddResult := func(presence string, err error) {\n\t\tpresences = append(presences, presence)\n\t\terrs = append(errs, err)\n\t}\n\n\tanyErr := false\n\tfor _, subres := range res.PresenceResponse {\n\t\tif !subres.GetValid() {\n\t\t\tanyErr = true\n\t\t\taddResult(\"\", ErrInvalidJID)\n\t\t\tcontinue\n\t\t}\n\t\tif !*subres.IsAvailable || subres.Presence == nil {\n\t\t\tanyErr = true\n\t\t\taddResult(\"\", ErrPresenceUnavailable)\n\t\t\tcontinue\n\t\t}\n\t\tpresence, ok := presenceMap[*subres.Presence]\n\t\tif ok {\n\t\t\taddResult(presence, nil)\n\t\t} else {\n\t\t\tanyErr = true\n\t\t\taddResult(\"\", fmt.Errorf(\"xmpp: unknown presence %q\", *subres.Presence))\n\t\t}\n\t}\n\tif anyErr {\n\t\treturn presences, errs\n\t}\n\treturn presences, nil\n}\n\nfunc init() {\n\tinternal.RegisterErrorCodeMap(\"xmpp\", pb.XmppServiceError_ErrorCode_name)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/.travis.yml",
    "content": "language: go\ngo:\n- 1.4\n- tip\ninstall:\n- go get -v google.golang.org/cloud/...\nscript:\n- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d\n- GCLOUD_TESTS_GOLANG_PROJECT_ID=\"dulcet-port-762\" GCLOUD_TESTS_GOLANG_KEY=\"$(pwd)/key.json\"\n  go test -v -tags=integration google.golang.org/cloud/...\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/AUTHORS",
    "content": "# This is the official list of cloud authors for copyright purposes.\n# This file is distinct from the CONTRIBUTORS files.\n# See the latter for an explanation.\n\n# Names should be added to this file as:\n# Name or Organization <email address>\n# The email address is not required for organizations.\n\nGoogle Inc.\nPalm Stone Games, Inc.\nPéter Szilágyi <peterke@gmail.com>\nTyler Treat <ttreat31@gmail.com>\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/CONTRIBUTING.md",
    "content": "# Contributing\n\n1. Sign one of the contributor license agreements below.\n1. `go get golang.org/x/review/git-review` to install the code reviewing tool.\n1. Get the cloud package by running `go get -d google.golang.org/cloud`.\nIf you've already got the package, make sure that the remote git origin\nis https://code.googlesource.com/gocloud.\n`git remote set-url origin https://code.googlesource.com/gocloud`\n1. Make changes and create a change by running `review change <name>`,\nprovide a command message, and use `review mail` to create a Gerrit CL.\n1. Keep amending to the change and mail as your recieve feedback.\n\n## Integration Tests\n\nAdditional to the unit tests, you may run the integration test suite.\n\nTo run the integrations tests, creating and configuration of a project in the\nGoogle Developers Console is required. Once you create a project, set the\nfollowing environment variables to be able to run the against the actual APIs.\n\n- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)\n- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.\n\nCreate a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**.\nThe storage integration test will create and delete some objects in this bucket.\n\nInstall the [gcloud command-line tool][gcloudcli] to your machine and use it\nto create the indexes used in the datastore integration tests with indexes\nfound in `datastore/testdata/index.yaml`:\n\nFrom the project's root directory:\n\n``` sh\n# Install the app component\n$ gcloud components update app\n\n# Set the default project in your env\n$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID\n\n# Authenticate the gcloud tool with your account\n$ gcloud auth login\n\n# Create the indexes\n$ gcloud preview datastore create-indexes datastore/testdata\n\n```\n\nYou can run the integration tests by running:\n\n``` sh\n$ go test -v -tags=integration google.golang.org/cloud/...\n```\n\n## Contributor License Agreements\n\nBefore we can accept your pull requests you'll need to sign a Contributor\nLicense Agreement (CLA):\n\n- **If you are an individual writing original source code** and **you own the\n- intellectual property**, then you'll need to sign an [individual CLA][indvcla].\n- **If you work for a company that wants to allow you to contribute your work**,\nthen you'll need to sign a [corporate CLA][corpcla].\n\nYou can sign these electronically (just scroll to the bottom). After that,\nwe'll be able to accept your pull requests.\n\n[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/\n[indvcla]: https://developers.google.com/open-source/cla/individual\n[corpcla]: https://developers.google.com/open-source/cla/corporate\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/CONTRIBUTORS",
    "content": "# People who have agreed to one of the CLAs and can contribute patches.\n# The AUTHORS file lists the copyright holders; this file\n# lists people.  For example, Google employees are listed here\n# but not in AUTHORS, because Google holds the copyright.\n#\n# https://developers.google.com/open-source/cla/individual\n# https://developers.google.com/open-source/cla/corporate\n#\n# Names should be added to this file as:\n#     Name <email address>\n\n# Keep the list alphabetically sorted.\n\nAndrew Gerrand <adg@golang.org>\nBrad Fitzpatrick <bradfitz@golang.org>\nBurcu Dogan <jbd@google.com>\nDave Day <djd@golang.org>\nDavid Symonds <dsymonds@golang.org>\nGlenn Lewis <gmlewis@google.com>\nJohan Euphrosine <proppy@google.com>\nLuna Duclos <luna.duclos@palmstonegames.com>\nMichael McGreevy <mcgreevy@golang.org>\nPéter Szilágyi <peterke@gmail.com>\nTyler Treat <ttreat31@gmail.com>\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2014 Google Inc.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/README.md",
    "content": "# Google Cloud for Go\n\n[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang)\n\n**NOTE:** These packages are experimental, and may occasionally make\nbackwards-incompatible changes.\n\n**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).\n\nGo packages for Google Cloud Platform services. Supported APIs include:\n\n * Google Cloud Datastore\n * Google Cloud Storage\n * Google Cloud Pub/Sub\n * Google Cloud Container Engine\n\n``` go\nimport \"google.golang.org/cloud\"\n```\n\nDocumentation and examples are available at\n[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud).\n\n## Authorization\n\nAuthorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2.\nRefer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2)\nfor examples on using oauth2 with the Cloud package.\n\n## Google Cloud Datastore\n\n[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully\nmanaged, schemaless database for storing non-relational data. Cloud Datastore\nautomatically scales with your users and supports ACID transactions, high availability\nof reads and writes, strong consistency for reads and ancestor queries, and eventual\nconsistency for all other queries.\n\nFollow the [activation instructions][cloud-datastore-activation] to use the Google\nCloud Datastore API with your project.\n\n[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore)\n\n\n```go\ntype Post struct {\n\tTitle       string\n\tBody        string `datastore:\",noindex\"`\n\tPublishedAt time.Time\n}\nkeys := []*datastore.Key{\n\tdatastore.NewKey(ctx, \"Post\", \"post1\", 0, nil),\n\tdatastore.NewKey(ctx, \"Post\", \"post2\", 0, nil),\n}\nposts := []*Post{\n\t{Title: \"Post 1\", Body: \"...\", PublishedAt: time.Now()},\n\t{Title: \"Post 2\", Body: \"...\", PublishedAt: time.Now()},\n}\nif _, err := datastore.PutMulti(ctx, keys, posts); err != nil {\n\tlog.Println(err)\n}\n```\n\n## Google Cloud Storage\n\n[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store\ndata on Google infrastructure with very high reliability, performance and availability,\nand can be used to distribute large data objects to users via direct download.\n\n[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage)\n\n\n```go\n// Read the object1 from bucket.\nrc, err := storage.NewReader(ctx, \"bucket\", \"object1\")\nif err != nil {\n\tlog.Fatal(err)\n}\nslurp, err := ioutil.ReadAll(rc)\nrc.Close()\nif err != nil {\n\tlog.Fatal(err)\n}\n```\n\n## Google Cloud Pub/Sub (Alpha)\n\n> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in\n> backward-incompatible ways and is not recommended for production use. It is not\n> subject to any SLA or deprecation policy.\n\n[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect\nyour services with reliable, many-to-many, asynchronous messaging hosted on Google's\ninfrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation\nfor building your own robust, global services.\n\n[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub)\n\n\n```go\n// Publish \"hello world\" on topic1.\nmsgIDs, err := pubsub.Publish(ctx, \"topic1\", &pubsub.Message{\n\tData: []byte(\"hello world\"),\n})\nif err != nil {\n\tlog.Println(err)\n}\n// Pull messages via subscription1.\nmsgs, err := pubsub.Pull(ctx, \"subscription1\", 1)\nif err != nil {\n\tlog.Println(err)\n}\n```\n\n## Contributing\n\nContributions are welcome. Please, see the\n[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md)\ndocument for details. We're using Gerrit for our code reviews. Please don't open pull\nrequests against this repo, new pull requests will be automatically closed.\n\n[cloud-datastore]: https://cloud.google.com/datastore/\n[cloud-datastore-docs]: https://cloud.google.com/datastore/docs\n[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate\n\n[cloud-pubsub]: https://cloud.google.com/pubsub/\n[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs\n\n[cloud-storage]: https://cloud.google.com/storage/\n[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview\n[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/bigquery.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\n// TODO(mcgreevy): support dry-run mode when creating jobs.\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\n// A Source is a source of data for the Copy function.\ntype Source interface {\n\timplementsSource()\n}\n\n// A Destination is a destination of data for the Copy function.\ntype Destination interface {\n\timplementsDestination()\n}\n\n// An Option is an optional argument to Copy.\ntype Option interface {\n\timplementsOption()\n}\n\n// A ReadSource is a source of data for the Read function.\ntype ReadSource interface {\n\timplementsReadSource()\n}\n\n// A ReadOption is an optional argument to Read.\ntype ReadOption interface {\n\tcustomizeRead(conf *pagingConf)\n}\n\nconst Scope = \"https://www.googleapis.com/auth/bigquery\"\n\n// Client may be used to perform BigQuery operations.\ntype Client struct {\n\tservice   service\n\tprojectID string\n}\n\n// Note: many of the methods on *Client appear in the various *_op.go source files.\n\n// NewClient constructs a new Client which can perform BigQuery operations.\n// Operations performed via the client are billed to the specified GCP project.\n// The supplied http.Client is used for making requests to the BigQuery server and must be capable of\n// authenticating requests with Scope.\nfunc NewClient(client *http.Client, projectID string) (*Client, error) {\n\tbqService, err := newBigqueryService(client)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"constructing bigquery client: %v\", err)\n\t}\n\n\tc := &Client{\n\t\tservice:   bqService,\n\t\tprojectID: projectID,\n\t}\n\treturn c, nil\n}\n\n// initJobProto creates and returns a bigquery Job proto.\n// The proto is customized using any jobOptions in options.\n// The list of Options is returned with the jobOptions removed.\nfunc initJobProto(projectID string, options []Option) (*bq.Job, []Option) {\n\tjob := &bq.Job{}\n\n\tvar other []Option\n\tfor _, opt := range options {\n\t\tif o, ok := opt.(jobOption); ok {\n\t\t\to.customizeJob(job, projectID)\n\t\t} else {\n\t\t\tother = append(other, opt)\n\t\t}\n\t}\n\treturn job, other\n}\n\n// Copy starts a BigQuery operation to copy data from a Source to a Destination.\nfunc (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) {\n\tswitch dst := dst.(type) {\n\tcase *Table:\n\t\tswitch src := src.(type) {\n\t\tcase *GCSReference:\n\t\t\treturn c.load(ctx, dst, src, options)\n\t\tcase *Table:\n\t\t\treturn c.cp(ctx, dst, Tables{src}, options)\n\t\tcase Tables:\n\t\t\treturn c.cp(ctx, dst, src, options)\n\t\tcase *Query:\n\t\t\treturn c.query(ctx, dst, src, options)\n\t\t}\n\tcase *GCSReference:\n\t\tif src, ok := src.(*Table); ok {\n\t\t\treturn c.extract(ctx, dst, src, options)\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no Copy operation matches dst/src pair: dst: %T ; src: %T\", dst, src)\n}\n\n// Read fetches data from a Source and returns the data via an Iterator.\nfunc (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) {\n\t// TODO(mcgreevy): support Query as a ReadSource.\n\t// TODO(mcgreevy): use ctx.\n\tswitch src := src.(type) {\n\tcase *Table:\n\t\treturn c.readTable(src, options)\n\t}\n\treturn nil, fmt.Errorf(\"src (%T) does not support the Read operation\", src)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/copy_op.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\ntype copyOption interface {\n\tcustomizeCopy(conf *bq.JobConfigurationTableCopy, projectID string)\n}\n\nfunc (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) {\n\tjob, options := initJobProto(c.projectID, options)\n\tpayload := &bq.JobConfigurationTableCopy{}\n\n\tdst.customizeCopyDst(payload, c.projectID)\n\tsrc.customizeCopySrc(payload, c.projectID)\n\n\tfor _, opt := range options {\n\t\to, ok := opt.(copyOption)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"option (%#v) not applicable to dst/src pair: dst: %T ; src: %T\", opt, dst, src)\n\t\t}\n\t\to.customizeCopy(payload, c.projectID)\n\t}\n\n\tjob.Configuration = &bq.JobConfiguration{\n\t\tCopy: payload,\n\t}\n\treturn c.service.insertJob(ctx, job, c.projectID)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/doc.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package bigquery provides a client for the BigQuery service.\n//\n// Note: This package is a work-in-progress.  Backwards-incompatible changes should be expected.\npackage bigquery\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/error.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\n// An Error contains detailed information about an error encountered while processing a job.\ntype Error struct {\n\t// Mirrors bq.ErrorProto, but drops DebugInfo\n\tLocation, Message, Reason string\n}\n\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"{Location: %q; Message: %q; Reason: %q}\", e.Location, e.Message, e.Reason)\n}\n\nfunc errorFromErrorProto(ep *bq.ErrorProto) *Error {\n\tif ep == nil {\n\t\treturn nil\n\t}\n\treturn &Error{\n\t\tLocation: ep.Location,\n\t\tMessage:  ep.Message,\n\t\tReason:   ep.Reason,\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/extract_op.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\ntype extractOption interface {\n\tcustomizeExtract(conf *bq.JobConfigurationExtract, projectID string)\n}\n\n// DisableHeader returns an Option that disables the printing of a header row in exported data.\nfunc DisableHeader() Option { return disableHeader{} }\n\ntype disableHeader struct{}\n\nfunc (opt disableHeader) implementsOption() {}\n\nfunc (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract, projectID string) {\n\tconf.PrintHeader = false\n}\n\nfunc (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) {\n\tjob, options := initJobProto(c.projectID, options)\n\tpayload := &bq.JobConfigurationExtract{}\n\n\tdst.customizeExtractDst(payload, c.projectID)\n\tsrc.customizeExtractSrc(payload, c.projectID)\n\n\tfor _, opt := range options {\n\t\to, ok := opt.(extractOption)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"option (%#v) not applicable to dst/src pair: dst: %T ; src: %T\", opt, dst, src)\n\t\t}\n\t\to.customizeExtract(payload, c.projectID)\n\t}\n\n\tjob.Configuration = &bq.JobConfiguration{\n\t\tExtract: payload,\n\t}\n\treturn c.service.insertJob(ctx, job, c.projectID)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/gcs.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport bq \"google.golang.org/api/bigquery/v2\"\n\n// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute\n// an input or output to a BigQuery operation.\ntype GCSReference struct {\n\turis []string\n\n\t// FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data.\n\t// The default is \",\".\n\tFieldDelimiter string\n\n\t// The number of rows at the top of a CSV file that BigQuery will skip when loading the data.\n\tSkipLeadingRows int64\n\n\t// SourceFormat is the format of the GCS data to be loaded into BigQuery.\n\t// Allowed values are: CSV, JSON, DatastoreBackup.  The default is CSV.\n\tSourceFormat DataFormat\n\t// Only used when loading data.\n\tEncoding Encoding\n\n\t// Quote is the value used to quote data sections in a CSV file.\n\t// The default quotation character is the double quote (\"), which is used if both Quote and ForceZeroQuote are unset.\n\t// To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true.\n\t// Only used when loading data.\n\tQuote          string\n\tForceZeroQuote bool\n\n\t// DestinationFormat is the format to use when writing exported files.\n\t// Allowed values are: CSV, Avro, JSON.  The default is CSV.\n\t// CSV is not supported for tables with nested or repeated fields.\n\tDestinationFormat DataFormat\n\t// Only used when writing data.  Default is None.\n\tCompression Compression\n}\n\nfunc (gcs *GCSReference) implementsSource()      {}\nfunc (gcs *GCSReference) implementsDestination() {}\n\n// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.\n// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.\n// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.\n// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.\n// For more information about the treatment of wildcards and multiple URIs,\n// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple\nfunc (c *Client) NewGCSReference(uri ...string) *GCSReference {\n\treturn &GCSReference{uris: uri}\n}\n\ntype DataFormat string\n\nconst (\n\tCSV             DataFormat = \"CSV\"\n\tAvro            DataFormat = \"AVRO\"\n\tJSON            DataFormat = \"NEWLINE_DELIMITED_JSON\"\n\tDatastoreBackup DataFormat = \"DATASTORE_BACKUP\"\n)\n\n// Encoding specifies the character encoding of data to be loaded into BigQuery.\n// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding\n// for more details about how this is used.\ntype Encoding string\n\nconst (\n\tUTF_8      Encoding = \"UTF-8\"\n\tISO_8859_1 Encoding = \"ISO-8859-1\"\n)\n\n// Compression is the type of compression to apply when writing data to Google Cloud Storage.\ntype Compression string\n\nconst (\n\tNone Compression = \"NONE\"\n\tGzip Compression = \"GZIP\"\n)\n\nfunc (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad, projectID string) {\n\tconf.SourceUris = gcs.uris\n\tconf.SkipLeadingRows = gcs.SkipLeadingRows\n\tconf.SourceFormat = string(gcs.SourceFormat)\n\tconf.Encoding = string(gcs.Encoding)\n\tconf.FieldDelimiter = gcs.FieldDelimiter\n\n\t// TODO(mcgreevy): take into account gcs.ForceZeroQuote once the underlying library supports it.\n\tconf.Quote = gcs.Quote\n}\n\nfunc (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract, projectID string) {\n\tconf.DestinationUris = gcs.uris\n\tconf.Compression = string(gcs.Compression)\n\tconf.DestinationFormat = string(gcs.DestinationFormat)\n\tconf.FieldDelimiter = gcs.FieldDelimiter\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/iterator.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// Iterator provides access to the result of a BigQuery lookup.\n// Next must be called before the first call to Get.\ntype Iterator struct {\n\ts service\n\n\t// conf contains the information necessary to make the next readTabledata call.\n\t// conf is set to nil when there is no more data to be fetched from the server.\n\tconf *readTabledataConf\n\trs   [][]Value // contains prefetched rows. The first element is returned by Get.\n\terr  error     // contains any error encountered during calls to Next.\n}\n\n// Next advances the Iterator to the next row, making that row available\n// via the Get method.\n// Next must be called before the first call to Get.\n// Next returns false when there are no more rows available, either because\n// the end of the output was reached, or because there was an error (consult\n// the Err method to determine which).\nfunc (it *Iterator) Next(ctx context.Context) bool {\n\tif it.err != nil {\n\t\treturn false\n\t}\n\n\tif len(it.rs) > 0 {\n\t\tit.rs = it.rs[1:]\n\t}\n\n\tif len(it.rs) == 0 {\n\t\tit.fetchRows(ctx)\n\t}\n\n\treturn it.hasCurrentRow()\n}\n\nfunc (it *Iterator) hasCurrentRow() bool {\n\treturn it.err == nil && len(it.rs) != 0\n}\n\n// fetchRows fetches a series of rows from the BigQuery service.\n// The fetched rows will be returned via subsequent calls to Get.\nfunc (it *Iterator) fetchRows(ctx context.Context) {\n\tif it.conf == nil {\n\t\treturn\n\t}\n\t// TODO(mcgreevy): refactor to support reads of query results.\n\tres, err := it.s.readTabledata(ctx, it.conf)\n\tif err != nil {\n\t\tit.err = err\n\t\treturn\n\t}\n\tif res.pageToken == \"\" {\n\t\t// No more data.\n\t\tit.conf = nil\n\t} else {\n\t\tit.conf.paging.pageToken = res.pageToken\n\t}\n\tit.rs = res.rows\n}\n\n// Err returns the last error encountered by Next, or nil for no error.\nfunc (it *Iterator) Err() error {\n\treturn it.err\n}\n\n// Get loads the current row into dst, which must implement ValueLoader.\nfunc (it *Iterator) Get(dst interface{}) error {\n\tif !it.hasCurrentRow() {\n\t\treturn fmt.Errorf(\"Get called on iterator with no remaining values\")\n\t}\n\n\tif dst, ok := dst.(ValueLoader); ok {\n\t\treturn dst.Load(it.rs[0])\n\t}\n\treturn fmt.Errorf(\"Get called with unsupported argument type\")\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/job.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"golang.org/x/net/context\"\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\n// A Job represents an operation which has been submitted to BigQuery for processing.\ntype Job struct {\n\tservice   service\n\tprojectID string\n\tjobID     string\n}\n\n// State is one of a sequence of states that a Job progresses through as it is processed.\ntype State int\n\nconst (\n\tPending State = iota\n\tRunning\n\tDone\n)\n\n// JobStatus contains the current State of a job, and errors encountered while processing that job.\ntype JobStatus struct {\n\tState State\n\n\terr error\n\n\t// All errors encountered during the running of the job.\n\t// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.\n\tErrors []*Error\n}\n\n// jobOption is an Option which modifies a bq.Job proto.\n// This is used for configuring values that apply to all operations, such as setting a jobReference.\ntype jobOption interface {\n\tcustomizeJob(job *bq.Job, projectID string)\n}\n\ntype jobID string\n\n// JobID returns an Option that sets the job ID of a BigQuery job.\n// If this Option is not used, a job ID is generated automatically.\nfunc JobID(ID string) Option {\n\treturn jobID(ID)\n}\n\nfunc (opt jobID) implementsOption() {}\n\nfunc (opt jobID) customizeJob(job *bq.Job, projectID string) {\n\tjob.JobReference = &bq.JobReference{\n\t\tJobId:     string(opt),\n\t\tProjectId: projectID,\n\t}\n}\n\n// Done reports whether the job has completed.\n// After Done returns true, the Err method will return an error if the job completed unsuccesfully.\nfunc (s *JobStatus) Done() bool {\n\treturn s.State == Done\n}\n\n// Err returns the error that caused the job to complete unsuccesfully (if any).\nfunc (s *JobStatus) Err() error {\n\treturn s.err\n}\n\n// Status returns the current status of the job.  It fails if the Status could not be determined.\nfunc (j *Job) Status(ctx context.Context) (*JobStatus, error) {\n\treturn j.service.jobStatus(ctx, j.projectID, j.jobID)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/load_op.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\ntype loadOption interface {\n\tcustomizeLoad(conf *bq.JobConfigurationLoad, projectID string)\n}\n\n// A DestinationSchema must be supplied when loading data from Google Cloud Storage into a non-existent table.\n// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup.\nfunc DestinationSchema(schema Schema) Option { return destSchema(schema) }\n\ntype destSchema Schema\n\nfunc (opt destSchema) implementsOption() {}\n\nfunc (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {\n\tvar fields []*bq.TableFieldSchema\n\tfor _, f := range opt {\n\t\tfields = append(fields, f.proto())\n\t}\n\tif len(fields) > 0 {\n\t\tconf.Schema = &bq.TableSchema{Fields: fields}\n\t}\n}\n\n// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored.\n// If this maximum is exceeded, the operation will be unsuccessful.\nfunc MaxBadRecords(n int64) Option { return maxBadRecords(n) }\n\ntype maxBadRecords int64\n\nfunc (opt maxBadRecords) implementsOption() {}\n\nfunc (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {\n\tconf.MaxBadRecords = int64(opt)\n}\n\n// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data.  Missing values are treated as nulls.\nfunc AllowJaggedRows() Option { return allowJaggedRows{} }\n\ntype allowJaggedRows struct{}\n\nfunc (opt allowJaggedRows) implementsOption() {}\n\nfunc (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {\n\tconf.AllowJaggedRows = true\n}\n\n// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data.\nfunc AllowQuotedNewlines() Option { return allowQuotedNewlines{} }\n\ntype allowQuotedNewlines struct{}\n\nfunc (opt allowQuotedNewlines) implementsOption() {}\n\nfunc (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {\n\tconf.AllowQuotedNewlines = true\n}\n\n// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated.\n// Unknown values are ignored. For CSV this ignores extra values at the end of a line.\n// For JSON this ignores named values that do not match any column name.\n// If this Option is not used, records containing unknown values are treated as bad records.\n// The MaxBadRecords Option can be used to customize how bad records are handled.\nfunc IgnoreUnknownValues() Option { return ignoreUnknownValues{} }\n\ntype ignoreUnknownValues struct{}\n\nfunc (opt ignoreUnknownValues) implementsOption() {}\n\nfunc (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {\n\tconf.IgnoreUnknownValues = true\n}\n\nfunc (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) {\n\tjob, options := initJobProto(c.projectID, options)\n\tpayload := &bq.JobConfigurationLoad{}\n\n\tdst.customizeLoadDst(payload, c.projectID)\n\tsrc.customizeLoadSrc(payload, c.projectID)\n\n\tfor _, opt := range options {\n\t\to, ok := opt.(loadOption)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"option (%#v) not applicable to dst/src pair: dst: %T ; src: %T\", opt, dst, src)\n\t\t}\n\t\to.customizeLoad(payload, c.projectID)\n\t}\n\n\tjob.Configuration = &bq.JobConfiguration{\n\t\tLoad: payload,\n\t}\n\treturn c.service.insertJob(ctx, job, c.projectID)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/query.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport bq \"google.golang.org/api/bigquery/v2\"\n\n// Query represents a query to be executed.\ntype Query struct {\n\t// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.\n\tQ string\n\n\t// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.\n\t// If DefaultProjectID is set, DefaultDatasetID must also be set.\n\tDefaultProjectID string\n\tDefaultDatasetID string\n}\n\nfunc (q *Query) implementsSource() {}\n\nfunc (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery, projectID string) {\n\tconf.Query = q.Q\n\tif q.DefaultProjectID != \"\" || q.DefaultDatasetID != \"\" {\n\t\tconf.DefaultDataset = &bq.DatasetReference{\n\t\t\tDatasetId: q.DefaultDatasetID,\n\t\t\tProjectId: q.DefaultProjectID,\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/query_op.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\ntype queryOption interface {\n\tcustomizeQuery(conf *bq.JobConfigurationQuery, projectID string)\n}\n\n// UseQueryCache returns an Option that causes results to be fetched from the query cache if they are available.\n// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.\n// Cached results are only available when TableID is unspecified in the query's destination Table.\n// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching\nfunc UseQueryCache() Option { return useQueryCache{} }\n\ntype useQueryCache struct{}\n\nfunc (opt useQueryCache) implementsOption() {}\n\nfunc (opt useQueryCache) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {\n\tconf.UseQueryCache = true\n}\n\n// JobPriority returns an Option that causes a query to be scheduled with the specified priority.\n// The default priority is InteractivePriority.\n// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries\nfunc JobPriority(priority string) Option { return jobPriority(priority) }\n\ntype jobPriority string\n\nfunc (opt jobPriority) implementsOption() {}\n\nfunc (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {\n\tconf.Priority = string(opt)\n}\n\nconst (\n\tBatchPriority       = \"BATCH\"\n\tInteractivePriority = \"INTERACTIVE\"\n)\n\n// TODO(mcgreevy): support large results.\n// TODO(mcgreevy): support non-flattened results.\n\nfunc (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) {\n\tjob, options := initJobProto(c.projectID, options)\n\tpayload := &bq.JobConfigurationQuery{}\n\n\tdst.customizeQueryDst(payload, c.projectID)\n\tsrc.customizeQuerySrc(payload, c.projectID)\n\n\tfor _, opt := range options {\n\t\to, ok := opt.(queryOption)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"option (%#v) not applicable to dst/src pair: dst: %T ; src: %T\", opt, dst, src)\n\t\t}\n\t\to.customizeQuery(payload, c.projectID)\n\t}\n\n\tjob.Configuration = &bq.JobConfiguration{\n\t\tQuery: payload,\n\t}\n\treturn c.service.insertJob(ctx, job, c.projectID)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/read_op.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\n// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery.\nfunc RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) }\n\ntype recordsPerRequest int64\n\nfunc (opt recordsPerRequest) customizeRead(conf *pagingConf) {\n\tconf.recordsPerRequest = int64(opt)\n\tconf.setRecordsPerRequest = true\n}\n\n// TODO(mcgreevy): support configurable startIndex and pageToken.\nfunc (c *Client) readTable(src *Table, options []ReadOption) (*Iterator, error) {\n\tconf := &readTabledataConf{}\n\tsrc.customizeReadSrc(conf)\n\n\tfor _, o := range options {\n\t\to.customizeRead(&conf.paging)\n\t}\n\n\t// The iterator takes care of actually fetching the data.\n\tit := &Iterator{\n\t\tconf: conf,\n\t\ts:    c.service,\n\t}\n\treturn it, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/schema.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport bq \"google.golang.org/api/bigquery/v2\"\n\n// Schema describes the fields in a table or query result.\ntype Schema []*FieldSchema\n\n// TODO(mcgreevy): add a function to generate a schema from a struct.\n\ntype FieldSchema struct {\n\t// The field name.\n\t// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),\n\t// and must start with a letter or underscore.\n\t// The maximum length is 128 characters.\n\tName string\n\n\t// A description of the field. The maximum length is 16,384 characters.\n\tDescription string\n\n\t// Whether the field may contain multiple values.\n\tRepeated bool\n\t// Whether the field is required.  Ignored if Repeated is true.\n\tRequired bool\n\n\t// The field data type.  If Type is Record, then this field contains a nested schema,\n\t// which is described by Schema.\n\tType FieldType\n\t// Describes the nested schema if Type is set to Record.\n\tSchema Schema\n}\n\nfunc (fs *FieldSchema) proto() *bq.TableFieldSchema {\n\ttfs := &bq.TableFieldSchema{\n\t\tDescription: fs.Description,\n\t\tName:        fs.Name,\n\t\tType:        string(fs.Type),\n\t}\n\n\tif fs.Repeated {\n\t\ttfs.Mode = \"REPEATED\"\n\t} else if fs.Required {\n\t\ttfs.Mode = \"REQUIRED\"\n\t} // else leave as default, which is interpreted as NULLABLE.\n\n\tfor _, f := range fs.Schema {\n\t\ttfs.Fields = append(tfs.Fields, f.proto())\n\t}\n\n\treturn tfs\n}\n\ntype FieldType string\n\nconst (\n\tStringFieldType    FieldType = \"STRING\"\n\tIntegerFieldType   FieldType = \"INTEGER\"\n\tFloatFieldType     FieldType = \"FLOAT\"\n\tBooleanFieldType   FieldType = \"BOOLEAN\"\n\tTimestampFieldType FieldType = \"TIMESTAMP\"\n\tRecordFieldType    FieldType = \"RECORD\"\n)\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/service.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\tbq \"google.golang.org/api/bigquery/v2\"\n)\n\n// service provides an internal abstraction to isolate the generated\n// BigQuery API; most of this package uses this interface instead.\n// The single implementation, *bigqueryService, contains all the knowledge\n// of the generated BigQuery API.\ntype service interface {\n\tinsertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error)\n\tjobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)\n\treadTabledata(ctx context.Context, conf *readTabledataConf) (*readTabledataResult, error)\n}\n\ntype bigqueryService struct {\n\ts *bq.Service\n}\n\nfunc newBigqueryService(client *http.Client) (*bigqueryService, error) {\n\ts, err := bq.New(client)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"constructing bigquery client: %v\", err)\n\t}\n\n\treturn &bigqueryService{s: s}, nil\n}\n\nfunc (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {\n\t// TODO(mcgreevy): use ctx\n\tres, err := s.s.Jobs.Insert(projectID, job).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil\n}\n\ntype pagingConf struct {\n\tpageToken string\n\n\trecordsPerRequest    int64\n\tsetRecordsPerRequest bool\n}\n\ntype readTabledataConf struct {\n\tprojectID, datasetID, tableID string\n\tpaging                        pagingConf\n}\n\ntype readTabledataResult struct {\n\tpageToken string\n\trows      [][]Value\n\ttotalRows int64\n}\n\nfunc (s *bigqueryService) readTabledata(ctx context.Context, conf *readTabledataConf) (*readTabledataResult, error) {\n\tlist := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID).\n\t\tPageToken(conf.paging.pageToken)\n\n\tif conf.paging.setRecordsPerRequest {\n\t\tlist = list.MaxResults(conf.paging.recordsPerRequest)\n\t}\n\n\tres, err := list.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rs [][]Value\n\tfor _, r := range res.Rows {\n\t\trs = append(rs, convertRow(r))\n\t}\n\n\tresult := &readTabledataResult{\n\t\tpageToken: res.PageToken,\n\t\trows:      rs,\n\t\ttotalRows: res.TotalRows,\n\t}\n\treturn result, nil\n}\n\nfunc convertRow(r *bq.TableRow) []Value {\n\tvar values []Value\n\tfor _, cell := range r.F {\n\t\tvalues = append(values, cell.V)\n\t}\n\treturn values\n}\n\nfunc (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {\n\t// TODO(mcgreevy): use ctx\n\tres, err := s.s.Jobs.Get(projectID, jobID).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn jobStatusFromProto(res.Status)\n}\n\nvar stateMap = map[string]State{\"PENDING\": Pending, \"RUNNING\": Running, \"DONE\": Done}\n\nfunc jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {\n\tstate, ok := stateMap[status.State]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected job state: %v\", status.State)\n\t}\n\n\tnewStatus := &JobStatus{\n\t\tState: state,\n\t\terr:   nil,\n\t}\n\tif err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {\n\t\tnewStatus.err = err\n\t}\n\n\tfor _, ep := range status.Errors {\n\t\tnewStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))\n\t}\n\treturn newStatus, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/table.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\nimport bq \"google.golang.org/api/bigquery/v2\"\n\n// A Table is a reference to a BigQuery table.\ntype Table struct {\n\t// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.\n\t// In this case the result will be stored in an ephemeral table.\n\tProjectID string\n\tDatasetID string\n\t// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\n\t// The maximum length is 1,024 characters.\n\tTableID string\n\n\t// All following fields are optional.\n\tCreateDisposition CreateDisposition // default is CreateIfNeeded.\n\tWriteDisposition  WriteDisposition  // default is WriteAppend.\n}\n\n// Tables is a group of tables. The tables may belong to differing projects or datasets.\ntype Tables []*Table\n\n// CreateDisposition specifies the circumstances under which destination table will be created.\ntype CreateDisposition string\n\nconst (\n\t// The table will be created if it does not already exist.  Tables are created atomically on successful completion of a job.\n\tCreateIfNeeded CreateDisposition = \"CREATE_IF_NEEDED\"\n\n\t// The table must already exist and will not be automatically created.\n\tCreateNever CreateDisposition = \"CREATE_NEVER\"\n)\n\n// WriteDisposition specifies how existing data in a destination table is treated.\ntype WriteDisposition string\n\nconst (\n\t// Data will be appended to any existing data in the destination table.\n\t// Data is appended atomically on successful completion of a job.\n\tWriteAppend WriteDisposition = \"WRITE_APPEND\"\n\n\t// Existing data in the destination table will be overwritten.\n\t// Data is overwritten atomically on successful completion of a job.\n\tWriteTruncate WriteDisposition = \"WRITE_TRUNCATE\"\n\n\t// Writes will fail if the destination table already contains data.\n\tWriteEmpty WriteDisposition = \"WRITE_EMPTY\"\n)\n\nfunc (t *Table) implementsSource()      {}\nfunc (t *Table) implementsReadSource()  {}\nfunc (t *Table) implementsDestination() {}\nfunc (ts Tables) implementsSource()     {}\n\nfunc (t *Table) tableRefProto() *bq.TableReference {\n\treturn &bq.TableReference{\n\t\tProjectId: t.ProjectID,\n\t\tDatasetId: t.DatasetID,\n\t\tTableId:   t.TableID,\n\t}\n}\n\n// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.\nfunc (t *Table) implicitTable() bool {\n\treturn t.ProjectID == \"\" && t.DatasetID == \"\" && t.TableID == \"\"\n}\n\nfunc (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad, projectID string) {\n\tconf.DestinationTable = t.tableRefProto()\n\tconf.CreateDisposition = string(t.CreateDisposition)\n\tconf.WriteDisposition = string(t.WriteDisposition)\n}\n\nfunc (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract, projectID string) {\n\tconf.SourceTable = t.tableRefProto()\n}\n\nfunc (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy, projectID string) {\n\tconf.DestinationTable = t.tableRefProto()\n\tconf.CreateDisposition = string(t.CreateDisposition)\n\tconf.WriteDisposition = string(t.WriteDisposition)\n}\n\nfunc (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy, projectID string) {\n\tfor _, t := range ts {\n\t\tconf.SourceTables = append(conf.SourceTables, t.tableRefProto())\n\t}\n}\n\nfunc (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery, projectID string) {\n\tif !t.implicitTable() {\n\t\tconf.DestinationTable = t.tableRefProto()\n\t}\n\tconf.CreateDisposition = string(t.CreateDisposition)\n\tconf.WriteDisposition = string(t.WriteDisposition)\n}\n\nfunc (t *Table) customizeReadSrc(conf *readTabledataConf) {\n\tconf.projectID = t.ProjectID\n\tconf.datasetID = t.DatasetID\n\tconf.tableID = t.TableID\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigquery/value.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bigquery\n\n// Value stores the contents of a single cell from a BigQuery result.\ntype Value interface{}\n\n// ValueLoader stores a slice of Values representing a result row from a Read operation.\n// See Iterator.Get for more information.\ntype ValueLoader interface {\n\tLoad(v []Value) error\n}\n\n// ValueList converts a []Value to implement ValueLoader.\ntype ValueList []Value\n\n// Load stores a sequence of values in a ValueList.\nfunc (vs *ValueList) Load(v []Value) error {\n\t*vs = append(*vs, v...)\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/admin.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage bigtable\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/cloud\"\n\tbtcspb \"google.golang.org/cloud/bigtable/internal/cluster_service_proto\"\n\tbttspb \"google.golang.org/cloud/bigtable/internal/table_service_proto\"\n\t\"google.golang.org/grpc\"\n)\n\nconst adminAddr = \"bigtabletableadmin.googleapis.com:443\"\n\n// AdminClient is a client type for performing admin operations on a specific cluster.\ntype AdminClient struct {\n\tconn    *grpc.ClientConn\n\ttClient bttspb.BigtableTableServiceClient\n\tcClient btcspb.BigtableClusterServiceClient\n\n\tproject, zone, cluster string\n}\n\n// NewAdminClient creates a new AdminClient for a given project, zone and cluster.\nfunc NewAdminClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*AdminClient, error) {\n\to := []cloud.ClientOption{\n\t\tcloud.WithEndpoint(adminAddr),\n\t\tcloud.WithScopes(AdminScope),\n\t}\n\to = append(o, opts...)\n\tconn, err := cloud.DialGRPC(ctx, o...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"dialing: %v\", err)\n\t}\n\treturn &AdminClient{\n\t\tconn:    conn,\n\t\ttClient: bttspb.NewBigtableTableServiceClient(conn),\n\t\tcClient: btcspb.NewBigtableClusterServiceClient(conn),\n\n\t\tproject: project,\n\t\tzone:    zone,\n\t\tcluster: cluster,\n\t}, nil\n}\n\n// Close closes the AdminClient.\nfunc (ac *AdminClient) Close() {\n\tac.conn.Close()\n}\n\nfunc (ac *AdminClient) clusterPrefix() string {\n\treturn fmt.Sprintf(\"projects/%s/zones/%s/clusters/%s\", ac.project, ac.zone, ac.cluster)\n}\n\n// Tables returns a list of the tables in the cluster.\nfunc (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {\n\tprefix := ac.clusterPrefix()\n\treq := &bttspb.ListTablesRequest{\n\t\tName: prefix,\n\t}\n\tres, err := ac.tClient.ListTables(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnames := make([]string, 0, len(res.Tables))\n\tfor _, tbl := range res.Tables {\n\t\tnames = append(names, strings.TrimPrefix(tbl.Name, prefix+\"/tables/\"))\n\t}\n\treturn names, nil\n}\n\n// CreateTable creates a new table in the cluster.\n// This method may return before the table's creation is complete.\nfunc (ac *AdminClient) CreateTable(ctx context.Context, table string) error {\n\tprefix := ac.clusterPrefix()\n\treq := &bttspb.CreateTableRequest{\n\t\tName:    prefix,\n\t\tTableId: table,\n\t}\n\t_, err := ac.tClient.CreateTable(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// CreateColumnFamily creates a new column family in a table.\nfunc (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {\n\t// TODO(dsymonds): Permit specifying gcexpr and any other family settings.\n\tprefix := ac.clusterPrefix()\n\treq := &bttspb.CreateColumnFamilyRequest{\n\t\tName:           prefix + \"/tables/\" + table,\n\t\tColumnFamilyId: family,\n\t}\n\t_, err := ac.tClient.CreateColumnFamily(ctx, req)\n\treturn err\n}\n\n// DeleteTable deletes a table and all of its data.\nfunc (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {\n\tprefix := ac.clusterPrefix()\n\treq := &bttspb.DeleteTableRequest{\n\t\tName: prefix + \"/tables/\" + table,\n\t}\n\t_, err := ac.tClient.DeleteTable(ctx, req)\n\treturn err\n}\n\n// DeleteColumnFamily deletes a column family in a table and all of its data.\nfunc (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {\n\tprefix := ac.clusterPrefix()\n\treq := &bttspb.DeleteColumnFamilyRequest{\n\t\tName: prefix + \"/tables/\" + table + \"/columnFamilies/\" + family,\n\t}\n\t_, err := ac.tClient.DeleteColumnFamily(ctx, req)\n\treturn err\n}\n\n// TableInfo represents information about a table.\ntype TableInfo struct {\n\tFamilies []string\n}\n\n// TableInfo retrieves information about a table.\nfunc (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {\n\tprefix := ac.clusterPrefix()\n\treq := &bttspb.GetTableRequest{\n\t\tName: prefix + \"/tables/\" + table,\n\t}\n\tres, err := ac.tClient.GetTable(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tti := &TableInfo{}\n\tfor fam := range res.ColumnFamilies {\n\t\tti.Families = append(ti.Families, fam)\n\t}\n\treturn ti, nil\n}\n\n// SetClusterSize sets the number of server nodes for this cluster.\nfunc (ac *AdminClient) SetClusterSize(ctx context.Context, nodes int) error {\n\treq := &btcspb.GetClusterRequest{\n\t\tName: ac.clusterPrefix(),\n\t}\n\tclu, err := ac.cClient.GetCluster(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclu.ServeNodes = int32(nodes)\n\t_, err = ac.cClient.UpdateCluster(ctx, clu)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/bigtable.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage bigtable\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/cloud\"\n\tbtdpb \"google.golang.org/cloud/bigtable/internal/data_proto\"\n\tbtspb \"google.golang.org/cloud/bigtable/internal/service_proto\"\n\t\"google.golang.org/grpc\"\n)\n\nconst prodAddr = \"bigtable.googleapis.com:443\"\n\n// Client is a client for reading and writing data to tables in a cluster.\ntype Client struct {\n\tconn   *grpc.ClientConn\n\tclient btspb.BigtableServiceClient\n\n\tproject, zone, cluster string\n}\n\n// NewClient creates a new Client for a given project, zone and cluster.\nfunc NewClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*Client, error) {\n\to := []cloud.ClientOption{\n\t\tcloud.WithEndpoint(prodAddr),\n\t\tcloud.WithScopes(Scope),\n\t}\n\to = append(o, opts...)\n\tconn, err := cloud.DialGRPC(ctx, o...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"dialing: %v\", err)\n\t}\n\treturn &Client{\n\t\tconn:   conn,\n\t\tclient: btspb.NewBigtableServiceClient(conn),\n\n\t\tproject: project,\n\t\tzone:    zone,\n\t\tcluster: cluster,\n\t}, nil\n}\n\n// Close closes the Client.\nfunc (c *Client) Close() {\n\tc.conn.Close()\n}\n\nfunc (c *Client) fullTableName(table string) string {\n\treturn fmt.Sprintf(\"projects/%s/zones/%s/clusters/%s/tables/%s\", c.project, c.zone, c.cluster, table)\n}\n\n// A Table refers to a table.\ntype Table struct {\n\tc     *Client\n\ttable string\n}\n\n// Open opens a table.\nfunc (c *Client) Open(table string) *Table {\n\treturn &Table{\n\t\tc:     c,\n\t\ttable: table,\n\t}\n}\n\n// TODO(dsymonds): Read method that returns a sequence of ReadItems.\n\n// ReadRows reads rows from a table. f is called for each row.\n// If f returns false, the stream is shut down and ReadRows returns.\n// f owns its argument, and f is called serially.\n//\n// By default, the yielded rows will contain all values in all cells.\n// Use RowFilter to limit the cells returned.\nfunc (t *Table) ReadRows(ctx context.Context, arg RowRange, f func(Row) bool, opts ...ReadOption) error {\n\treq := &btspb.ReadRowsRequest{\n\t\tTableName: t.c.fullTableName(t.table),\n\t\tRowRange:  arg.proto(),\n\t}\n\tfor _, opt := range opts {\n\t\topt.set(req)\n\t}\n\tctx, cancel := context.WithCancel(ctx) // for aborting the stream\n\tstream, err := t.c.client.ReadRows(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcr := new(chunkReader)\n\tfor {\n\t\tres, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif row := cr.process(res); row != nil {\n\t\t\tif !f(row) {\n\t\t\t\t// Cancel and drain stream.\n\t\t\t\tcancel()\n\t\t\t\tfor {\n\t\t\t\t\tif _, err := stream.Recv(); err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// ReadRow is a convenience implementation of a single-row reader.\n// A missing row will return a zero-length map and a nil error.\nfunc (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {\n\tvar r Row\n\terr := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {\n\t\tr = rr\n\t\treturn true\n\t}, opts...)\n\treturn r, err\n}\n\ntype chunkReader struct {\n\tpartial map[string]Row // incomplete rows\n}\n\n// process handles a single btspb.ReadRowsResponse.\n// If it completes a row, that row is returned.\nfunc (cr *chunkReader) process(rrr *btspb.ReadRowsResponse) Row {\n\tif cr.partial == nil {\n\t\tcr.partial = make(map[string]Row)\n\t}\n\trow := string(rrr.RowKey)\n\tr := cr.partial[row]\n\tif r == nil {\n\t\tr = make(Row)\n\t\tcr.partial[row] = r\n\t}\n\tfor _, chunk := range rrr.Chunks {\n\t\tif chunk.ResetRow {\n\t\t\tr = make(Row)\n\t\t\tcr.partial[row] = r\n\t\t\tcontinue\n\t\t}\n\t\tif chunk.CommitRow {\n\t\t\tdelete(cr.partial, row)\n\t\t\treturn r // assume that this is the last chunk\n\t\t}\n\t\tdecodeFamilyProto(r, row, chunk.RowContents)\n\t}\n\treturn nil\n}\n\n// decodeFamilyProto adds the cell data from f to the given row.\nfunc decodeFamilyProto(r Row, row string, f *btdpb.Family) {\n\tfam := f.Name // does not have colon\n\tfor _, col := range f.Columns {\n\t\tfor _, cell := range col.Cells {\n\t\t\tri := ReadItem{\n\t\t\t\tRow:       row,\n\t\t\t\tColumn:    fmt.Sprintf(\"%s:%s\", fam, col.Qualifier),\n\t\t\t\tTimestamp: Timestamp(cell.TimestampMicros),\n\t\t\t\tValue:     cell.Value,\n\t\t\t}\n\t\t\tr[fam] = append(r[fam], ri)\n\t\t}\n\t}\n}\n\n// A RowRange is used to describe the rows to be read.\n// A RowRange is a half-open interval [Start, Limit) encompassing\n// all the rows with keys at least as large as Start, and less than Limit.\n// (Bigtable string comparison is the same as Go's.)\n// A RowRange can be unbounded, encompassing all keys at least as large as Start.\ntype RowRange struct {\n\tstart string\n\tlimit string\n}\n\n// NewRange returns the new RowRange [begin, end).\nfunc NewRange(begin, end string) RowRange {\n\treturn RowRange{\n\t\tstart: begin,\n\t\tlimit: end,\n\t}\n}\n\n// Unbounded tests whether a RowRange is unbounded.\nfunc (r RowRange) Unbounded() bool {\n\treturn r.limit == \"\"\n}\n\n// Contains says whether the RowRange contains the key.\nfunc (r RowRange) Contains(row string) bool {\n\treturn r.start <= row && (r.limit == \"\" || r.limit > row)\n}\n\n// String provides a printable description of a RowRange.\nfunc (r RowRange) String() string {\n\ta := strconv.Quote(r.start)\n\tif r.Unbounded() {\n\t\treturn fmt.Sprintf(\"[%s,∞)\", a)\n\t}\n\treturn fmt.Sprintf(\"[%s,%q)\", a, r.limit)\n}\n\nfunc (r RowRange) proto() *btdpb.RowRange {\n\tif r.Unbounded() {\n\t\treturn &btdpb.RowRange{StartKey: []byte(r.start)}\n\t}\n\treturn &btdpb.RowRange{\n\t\tStartKey: []byte(r.start),\n\t\tEndKey:   []byte(r.limit),\n\t}\n}\n\n// SingleRow returns a RowRange for reading a single row.\nfunc SingleRow(row string) RowRange {\n\treturn RowRange{\n\t\tstart: row,\n\t\tlimit: row + \"\\x00\",\n\t}\n}\n\n// PrefixRange returns a RowRange consisting of all keys starting with the prefix.\nfunc PrefixRange(prefix string) RowRange {\n\treturn RowRange{\n\t\tstart: prefix,\n\t\tlimit: prefixSuccessor(prefix),\n\t}\n}\n\n// InfiniteRange returns the RowRange consisting of all keys at least as\n// large as start.\nfunc InfiniteRange(start string) RowRange {\n\treturn RowRange{\n\t\tstart: start,\n\t\tlimit: \"\",\n\t}\n}\n\n// prefixSuccessor returns the lexically smallest string greater than the\n// prefix, if it exists, or \"\" otherwise.  In either case, it is the string\n// needed for the Limit of a RowRange.\nfunc prefixSuccessor(prefix string) string {\n\tif prefix == \"\" {\n\t\treturn \"\" // infinite range\n\t}\n\tn := len(prefix)\n\tfor n--; n >= 0 && prefix[n] == '\\xff'; n-- {\n\t}\n\tif n == -1 {\n\t\treturn \"\"\n\t}\n\tans := []byte(prefix[:n])\n\tans = append(ans, prefix[n]+1)\n\treturn string(ans)\n}\n\n// A ReadOption is an optional argument to ReadRows.\ntype ReadOption interface {\n\tset(req *btspb.ReadRowsRequest)\n}\n\n// RowFilter returns a ReadOption that applies f to the contents of read rows.\nfunc RowFilter(f Filter) ReadOption { return rowFilter{f} }\n\ntype rowFilter struct{ f Filter }\n\nfunc (rf rowFilter) set(req *btspb.ReadRowsRequest) { req.Filter = rf.f.proto() }\n\n// LimitRows returns a ReadOption that will limit the number of rows to be read.\nfunc LimitRows(limit int64) ReadOption { return limitRows{limit} }\n\ntype limitRows struct{ limit int64 }\n\nfunc (lr limitRows) set(req *btspb.ReadRowsRequest) { req.NumRowsLimit = lr.limit }\n\n// A Row is returned by ReadRow. The map is keyed by column family (the prefix\n// of the column name before the colon). The values are the returned ReadItems\n// for that column family in the order returned by Read.\ntype Row map[string][]ReadItem\n\n// Key returns the row's key, or \"\" if the row is empty.\nfunc (r Row) Key() string {\n\tfor _, items := range r {\n\t\tif len(items) > 0 {\n\t\t\treturn items[0].Row\n\t\t}\n\t}\n\treturn \"\"\n}\n\n// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column.\ntype ReadItem struct {\n\tRow, Column string\n\tTimestamp   Timestamp\n\tValue       []byte\n}\n\n// Apply applies a Mutation to a specific row.\nfunc (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {\n\tafter := func(res proto.Message) {\n\t\tfor _, o := range opts {\n\t\t\to.after(res)\n\t\t}\n\t}\n\n\tif m.cond == nil {\n\t\treq := &btspb.MutateRowRequest{\n\t\t\tTableName: t.c.fullTableName(t.table),\n\t\t\tRowKey:    []byte(row),\n\t\t\tMutations: m.ops,\n\t\t}\n\t\tres, err := t.c.client.MutateRow(ctx, req)\n\t\tif err == nil {\n\t\t\tafter(res)\n\t\t}\n\t\treturn err\n\t}\n\treq := &btspb.CheckAndMutateRowRequest{\n\t\tTableName:       t.c.fullTableName(t.table),\n\t\tRowKey:          []byte(row),\n\t\tPredicateFilter: m.cond.proto(),\n\t}\n\tif m.mtrue != nil {\n\t\treq.TrueMutations = m.mtrue.ops\n\t}\n\tif m.mfalse != nil {\n\t\treq.FalseMutations = m.mfalse.ops\n\t}\n\tres, err := t.c.client.CheckAndMutateRow(ctx, req)\n\tif err == nil {\n\t\tafter(res)\n\t}\n\treturn err\n}\n\n// An ApplyOption is an optional argument to Apply.\ntype ApplyOption interface {\n\tafter(res proto.Message)\n}\n\ntype applyAfterFunc func(res proto.Message)\n\nfunc (a applyAfterFunc) after(res proto.Message) { a(res) }\n\n// GetCondMutationResult returns an ApplyOption that reports whether the conditional\n// mutation's condition matched.\nfunc GetCondMutationResult(matched *bool) ApplyOption {\n\treturn applyAfterFunc(func(res proto.Message) {\n\t\tif res, ok := res.(*btspb.CheckAndMutateRowResponse); ok {\n\t\t\t*matched = res.PredicateMatched\n\t\t}\n\t})\n}\n\n// Mutation represents a set of changes for a single row of a table.\ntype Mutation struct {\n\tops []*btdpb.Mutation\n\n\t// for conditional mutations\n\tcond          Filter\n\tmtrue, mfalse *Mutation\n}\n\n// NewMutation returns a new mutation.\nfunc NewMutation() *Mutation {\n\treturn new(Mutation)\n}\n\n// NewCondMutation returns a conditional mutation.\n// The given row filter determines which mutation is applied:\n// If the filter matches any cell in the row, mtrue is applied;\n// otherwise, mfalse is applied.\n// Either given mutation may be nil.\nfunc NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {\n\treturn &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}\n}\n\n// Set sets a value in a specified column, with the given timestamp.\n// The timestamp will be truncated to millisecond resolution.\n// A timestamp of ServerTime means to use the server timestamp.\nfunc (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {\n\tif ts != ServerTime {\n\t\t// Truncate to millisecond resolution, since that's the default table config.\n\t\t// TODO(dsymonds): Provide a way to override this behaviour.\n\t\tts -= ts % 1000\n\t}\n\tm.ops = append(m.ops, &btdpb.Mutation{SetCell: &btdpb.Mutation_SetCell{\n\t\tFamilyName:      family,\n\t\tColumnQualifier: []byte(column),\n\t\tTimestampMicros: int64(ts),\n\t\tValue:           value,\n\t}})\n}\n\n// DeleteCellsInColumn will delete all the cells whose columns are family:column.\nfunc (m *Mutation) DeleteCellsInColumn(family, column string) {\n\tm.ops = append(m.ops, &btdpb.Mutation{DeleteFromColumn: &btdpb.Mutation_DeleteFromColumn{\n\t\tFamilyName:      family,\n\t\tColumnQualifier: []byte(column),\n\t}})\n}\n\n// DeleteTimestampRange deletes all cells whose columns are family:column\n// and whose timestamps are in the half-open interval [start, end).\n// If end is zero, it will be interpreted as infinity.\nfunc (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {\n\tm.ops = append(m.ops, &btdpb.Mutation{DeleteFromColumn: &btdpb.Mutation_DeleteFromColumn{\n\t\tFamilyName:      family,\n\t\tColumnQualifier: []byte(column),\n\t\tTimeRange: &btdpb.TimestampRange{\n\t\t\tStartTimestampMicros: int64(start),\n\t\t\tEndTimestampMicros:   int64(end),\n\t\t},\n\t}})\n}\n\n// DeleteCellsInFamily will delete all the cells whose columns are family:*.\nfunc (m *Mutation) DeleteCellsInFamily(family string) {\n\tm.ops = append(m.ops, &btdpb.Mutation{DeleteFromFamily: &btdpb.Mutation_DeleteFromFamily{\n\t\tFamilyName: family,\n\t}})\n}\n\n// DeleteRow deletes the entire row.\nfunc (m *Mutation) DeleteRow() {\n\tm.ops = append(m.ops, &btdpb.Mutation{DeleteFromRow: &btdpb.Mutation_DeleteFromRow{}})\n}\n\n// Timestamp is in units of microseconds since 1 January 1970.\ntype Timestamp int64\n\n// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.\n// It indicates that the server's timestamp should be used.\nconst ServerTime Timestamp = -1\n\n// Time converts a time.Time into a Timestamp.\nfunc Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }\n\n// Now returns the Timestamp representation of the current time on the client.\nfunc Now() Timestamp { return Time(time.Now()) }\n\n// Time converts a Timestamp into a time.Time.\nfunc (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }\n\n// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.\n// It returns the newly written cells.\nfunc (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {\n\treq := &btspb.ReadModifyWriteRowRequest{\n\t\tTableName: t.c.fullTableName(t.table),\n\t\tRowKey:    []byte(row),\n\t\tRules:     m.ops,\n\t}\n\tres, err := t.c.client.ReadModifyWriteRow(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := make(Row)\n\tfor _, fam := range res.Families { // res is *btdpb.Row, fam is *btdpb.Family\n\t\tdecodeFamilyProto(r, row, fam)\n\t}\n\treturn r, nil\n}\n\n// ReadModifyWrite represents a set of operations on a single row of a table.\n// It is like Mutation but for non-idempotent changes.\n// When applied, these operations operate on the latest values of the row's cells,\n// and result in a new value being written to the relevant cell with a timestamp\n// that is max(existing timestamp, current server time).\n//\n// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will\n// be executed serially by the server.\ntype ReadModifyWrite struct {\n\tops []*btdpb.ReadModifyWriteRule\n}\n\n// NewReadModifyWrite returns a new ReadModifyWrite.\nfunc NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }\n\n// AppendValue appends a value to a specific cell's value.\n// If the cell is unset, it will be treated as an empty value.\nfunc (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {\n\tm.ops = append(m.ops, &btdpb.ReadModifyWriteRule{\n\t\tFamilyName:      family,\n\t\tColumnQualifier: []byte(column),\n\t\tAppendValue:     v,\n\t})\n}\n\n// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,\n// and adds a value to it. If the cell is unset, it will be treated as zero.\n// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite\n// operation will fail.\nfunc (m *ReadModifyWrite) Increment(family, column string, delta int64) {\n\tm.ops = append(m.ops, &btdpb.ReadModifyWriteRule{\n\t\tFamilyName:      family,\n\t\tColumnQualifier: []byte(column),\n\t\tIncrementAmount: delta,\n\t})\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/bttest/inmem.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nPackage bttest contains test helpers for working with the bigtable package.\n\nTo use a Server, create it, and then connect to it with no security:\n(The project/zone/cluster values are ignored.)\n\tsrv, err := bttest.NewServer()\n\t...\n\tclient, err := bigtable.NewClient(ctx, proj, zone, cluster,\n\t\tbigtable.WithCredentials(nil), bigtable.WithInsecureAddr(srv.Addr))\n\t...\n*/\npackage bttest\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org/x/net/context\"\n\tbtdpb \"google.golang.org/cloud/bigtable/internal/data_proto\"\n\temptypb \"google.golang.org/cloud/bigtable/internal/empty\"\n\tbtspb \"google.golang.org/cloud/bigtable/internal/service_proto\"\n\tbttdpb \"google.golang.org/cloud/bigtable/internal/table_data_proto\"\n\tbttspb \"google.golang.org/cloud/bigtable/internal/table_service_proto\"\n\t\"google.golang.org/grpc\"\n)\n\n// Server is an in-memory Cloud Bigtable fake.\n// It is unauthenticated, and only a rough approximation.\ntype Server struct {\n\tAddr string\n\n\tl   net.Listener\n\tsrv *grpc.Server\n\ts   *server\n}\n\n// server is the real implementation of the fake.\n// It is a separate and unexported type so the API won't be cluttered with\n// methods that are only relevant to the fake's implementation.\ntype server struct {\n\tmu     sync.Mutex\n\ttables map[string]*table // keyed by fully qualified name\n\n\t// Any unimplemented methods will cause a panic.\n\tbttspb.BigtableTableServiceServer\n\tbtspb.BigtableServiceServer\n}\n\n// NewServer creates a new Server. The Server will be listening for gRPC connections\n// at the address named by the Addr field, without TLS.\nfunc NewServer() (*Server, error) {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &Server{\n\t\tAddr: l.Addr().String(),\n\t\tl:    l,\n\t\tsrv:  grpc.NewServer(),\n\t\ts: &server{\n\t\t\ttables: make(map[string]*table),\n\t\t},\n\t}\n\tbttspb.RegisterBigtableTableServiceServer(s.srv, s.s)\n\tbtspb.RegisterBigtableServiceServer(s.srv, s.s)\n\n\tgo s.srv.Serve(s.l)\n\n\treturn s, nil\n}\n\n// Close shuts down the server.\nfunc (s *Server) Close() {\n\ts.srv.Stop()\n\ts.l.Close()\n}\n\nfunc (s *server) CreateTable(ctx context.Context, req *bttspb.CreateTableRequest) (*bttdpb.Table, error) {\n\ttbl := req.Name + \"/tables/\" + req.TableId\n\n\ts.mu.Lock()\n\tif _, ok := s.tables[tbl]; ok {\n\t\ts.mu.Unlock()\n\t\treturn nil, fmt.Errorf(\"table %q already exists\", tbl)\n\t}\n\ts.tables[tbl] = newTable()\n\ts.mu.Unlock()\n\n\treturn &bttdpb.Table{Name: tbl}, nil\n}\n\nfunc (s *server) ListTables(ctx context.Context, req *bttspb.ListTablesRequest) (*bttspb.ListTablesResponse, error) {\n\tres := &bttspb.ListTablesResponse{}\n\tprefix := req.Name + \"/tables/\"\n\n\ts.mu.Lock()\n\tfor tbl := range s.tables {\n\t\tif strings.HasPrefix(tbl, prefix) {\n\t\t\tres.Tables = append(res.Tables, &bttdpb.Table{Name: tbl})\n\t\t}\n\t}\n\ts.mu.Unlock()\n\n\treturn res, nil\n}\n\nfunc (s *server) DeleteTable(ctx context.Context, req *bttspb.DeleteTableRequest) (*emptypb.Empty, error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif _, ok := s.tables[req.Name]; !ok {\n\t\treturn nil, fmt.Errorf(\"no such table %q\", req.Name)\n\t}\n\tdelete(s.tables, req.Name)\n\treturn &emptypb.Empty{}, nil\n}\n\nfunc (s *server) CreateColumnFamily(ctx context.Context, req *bttspb.CreateColumnFamilyRequest) (*bttdpb.ColumnFamily, error) {\n\ts.mu.Lock()\n\ttbl, ok := s.tables[req.Name]\n\ts.mu.Unlock()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no such table %q\", req.Name)\n\t}\n\n\t// Check it is unique and record it.\n\tfam := req.ColumnFamilyId\n\ttbl.mu.Lock()\n\tdefer tbl.mu.Unlock()\n\tif _, ok := tbl.families[fam]; ok {\n\t\treturn nil, fmt.Errorf(\"family %q already exists\", fam)\n\t}\n\ttbl.families[fam] = true\n\treturn &bttdpb.ColumnFamily{\n\t\tName: req.Name + \"/families/\" + fam,\n\t}, nil\n}\n\nfunc (s *server) ReadRows(req *btspb.ReadRowsRequest, stream btspb.BigtableService_ReadRowsServer) error {\n\ts.mu.Lock()\n\ttbl, ok := s.tables[req.TableName]\n\ts.mu.Unlock()\n\tif !ok {\n\t\treturn fmt.Errorf(\"no such table %q\", req.TableName)\n\t}\n\n\tvar start, end string // half-open interval\n\tif rr := req.RowRange; rr != nil {\n\t\tstart, end = string(rr.StartKey), string(rr.EndKey)\n\t} else {\n\t\t// A single row read is simply an edge case.\n\t\tstart = string(req.RowKey)\n\t\tend = start + \"\\x00\"\n\t}\n\n\t// Get rows to stream back.\n\ttbl.mu.RLock()\n\tsi, ei := 0, len(tbl.rows) // half-open interval\n\tif start != \"\" {\n\t\tsi = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start })\n\t}\n\tif end != \"\" {\n\t\tei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end })\n\t}\n\tif si >= ei {\n\t\ttbl.mu.RUnlock()\n\t\treturn nil\n\t}\n\trows := make([]*row, ei-si)\n\tcopy(rows, tbl.rows[si:ei])\n\ttbl.mu.RUnlock()\n\n\tfor _, r := range rows {\n\t\tif err := streamRow(stream, r, req.Filter); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc streamRow(stream btspb.BigtableService_ReadRowsServer, r *row, f *btdpb.RowFilter) error {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\trrr := &btspb.ReadRowsResponse{\n\t\tRowKey: []byte(r.key),\n\t}\n\tfor col, cs := range r.cells {\n\t\ti := strings.Index(col, \":\") // guaranteed to exist\n\t\tfam, col := col[:i], col[i+1:]\n\t\tcells := filterCells(f, r, fam, col, cs)\n\t\tif len(cells) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// TODO(dsymonds): Apply transformers.\n\t\tchunk := &btspb.ReadRowsResponse_Chunk{\n\t\t\tRowContents: &btdpb.Family{\n\t\t\t\tName: fam,\n\t\t\t\tColumns: []*btdpb.Column{{\n\t\t\t\t\tQualifier: []byte(col),\n\t\t\t\t\t// Cells is populated below.\n\t\t\t\t}},\n\t\t\t},\n\t\t}\n\t\tcolm := chunk.RowContents.Columns[0]\n\t\tfor _, cell := range cells {\n\t\t\tcolm.Cells = append(colm.Cells, &btdpb.Cell{\n\t\t\t\tTimestampMicros: cell.ts,\n\t\t\t\tValue:           cell.value,\n\t\t\t})\n\t\t}\n\t\trrr.Chunks = append(rrr.Chunks, chunk)\n\t}\n\trrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{CommitRow: true})\n\treturn stream.Send(rrr)\n}\n\nfunc filterCells(f *btdpb.RowFilter, r *row, fam, col string, cs []cell) []cell {\n\t// Special handling for cells_per_column_limit_filter.\n\tif f != nil && f.CellsPerColumnLimitFilter > 0 {\n\t\tn := int(f.CellsPerColumnLimitFilter)\n\t\tif n > len(cs) {\n\t\t\tn = len(cs)\n\t\t}\n\t\treturn cs[:n]\n\t}\n\n\tvar ret []cell\n\tfor _, cell := range cs {\n\t\tif includeCell(f, r, fam, col, cell) {\n\t\t\tret = append(ret, cell)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc includeCell(f *btdpb.RowFilter, r *row, fam, col string, cell cell) bool {\n\tif f == nil {\n\t\treturn true\n\t}\n\t// TODO(dsymonds): Implement many more filters.\n\tswitch {\n\tdefault:\n\t\tlog.Printf(\"WARNING: don't know how to handle filter (ignoring it): %v\", f)\n\t\treturn true\n\tcase f.Chain != nil:\n\t\tfor _, sub := range f.Chain.Filters {\n\t\t\tif !includeCell(sub, r, fam, col, cell) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase len(f.ColumnQualifierRegexFilter) > 0:\n\t\tpat := string(f.ColumnQualifierRegexFilter)\n\t\trx, err := regexp.Compile(pat)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Bad column_qualifier_regex_filter pattern %q: %v\", pat, err)\n\t\t\treturn false\n\t\t}\n\t\treturn rx.MatchString(col)\n\tcase len(f.ValueRegexFilter) > 0:\n\t\tpat := string(f.ValueRegexFilter)\n\t\trx, err := regexp.Compile(pat)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Bad value_regex_filter pattern %q: %v\", pat, err)\n\t\t\treturn false\n\t\t}\n\t\treturn rx.Match(cell.value)\n\t}\n}\n\nfunc (s *server) MutateRow(ctx context.Context, req *btspb.MutateRowRequest) (*emptypb.Empty, error) {\n\ts.mu.Lock()\n\ttbl, ok := s.tables[req.TableName]\n\ts.mu.Unlock()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no such table %q\", req.TableName)\n\t}\n\n\tr := tbl.mutableRow(string(req.RowKey))\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif err := applyMutations(tbl, r, req.Mutations); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &emptypb.Empty{}, nil\n}\n\nfunc (s *server) CheckAndMutateRow(ctx context.Context, req *btspb.CheckAndMutateRowRequest) (*btspb.CheckAndMutateRowResponse, error) {\n\ts.mu.Lock()\n\ttbl, ok := s.tables[req.TableName]\n\ts.mu.Unlock()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no such table %q\", req.TableName)\n\t}\n\n\tres := &btspb.CheckAndMutateRowResponse{}\n\n\tr := tbl.mutableRow(string(req.RowKey))\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\t// Figure out which mutation to apply.\n\twhichMut := false\n\tif req.PredicateFilter == nil {\n\t\t// Use true_mutations iff row contains any cells.\n\t\twhichMut = len(r.cells) > 0\n\t} else {\n\t\t// Use true_mutations iff any cells in the row match the filter.\n\t\tfor col, cs := range r.cells {\n\t\t\ti := strings.Index(col, \":\") // guaranteed to exist\n\t\t\tfam, col := col[:i], col[i+1:]\n\t\t\tfor _, cell := range cs {\n\t\t\t\tif includeCell(req.PredicateFilter, r, fam, col, cell) {\n\t\t\t\t\twhichMut = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif whichMut {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// TODO(dsymonds): Figure out if this is supposed to be set\n\t\t// even when there's no predicate filter.\n\t\tres.PredicateMatched = whichMut\n\t}\n\tmuts := req.FalseMutations\n\tif whichMut {\n\t\tmuts = req.TrueMutations\n\t}\n\n\tif err := applyMutations(tbl, r, muts); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n// applyMutations applies a sequence of mutations to a row.\n// It assumes r.mu is locked.\nfunc applyMutations(tbl *table, r *row, muts []*btdpb.Mutation) error {\n\tfor _, mut := range muts {\n\t\tswitch {\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"can't handle mutation %v\", mut)\n\t\tcase mut.SetCell != nil:\n\t\t\tset := mut.SetCell\n\t\t\ttbl.mu.RLock()\n\t\t\tfamOK := tbl.families[set.FamilyName]\n\t\t\ttbl.mu.RUnlock()\n\t\t\tif !famOK {\n\t\t\t\treturn fmt.Errorf(\"unknown family %q\", set.FamilyName)\n\t\t\t}\n\t\t\tif !tbl.validTimestamp(set.TimestampMicros) {\n\t\t\t\treturn fmt.Errorf(\"invalid timestamp %d\", set.TimestampMicros)\n\t\t\t}\n\t\t\tcol := fmt.Sprintf(\"%s:%s\", set.FamilyName, set.ColumnQualifier)\n\n\t\t\tcs := r.cells[col]\n\t\t\tnewCell := cell{ts: set.TimestampMicros, value: set.Value}\n\t\t\treplaced := false\n\t\t\tfor i, cell := range cs {\n\t\t\t\tif cell.ts == newCell.ts {\n\t\t\t\t\tcs[i] = newCell\n\t\t\t\t\treplaced = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !replaced {\n\t\t\t\tcs = append(cs, newCell)\n\t\t\t}\n\t\t\tsort.Sort(byDescTS(cs))\n\t\t\tr.cells[col] = cs\n\t\tcase mut.DeleteFromColumn != nil:\n\t\t\tdel := mut.DeleteFromColumn\n\t\t\tcol := fmt.Sprintf(\"%s:%s\", del.FamilyName, del.ColumnQualifier)\n\n\t\t\tcs := r.cells[col]\n\t\t\tif del.TimeRange != nil {\n\t\t\t\ttsr := del.TimeRange\n\t\t\t\tif !tbl.validTimestamp(tsr.StartTimestampMicros) {\n\t\t\t\t\treturn fmt.Errorf(\"invalid timestamp %d\", tsr.StartTimestampMicros)\n\t\t\t\t}\n\t\t\t\tif !tbl.validTimestamp(tsr.EndTimestampMicros) {\n\t\t\t\t\treturn fmt.Errorf(\"invalid timestamp %d\", tsr.EndTimestampMicros)\n\t\t\t\t}\n\t\t\t\t// Find half-open interval to remove.\n\t\t\t\t// Cells are in descending timestamp order,\n\t\t\t\t// so the predicates to sort.Search are inverted.\n\t\t\t\tsi, ei := 0, len(cs)\n\t\t\t\tif tsr.StartTimestampMicros > 0 {\n\t\t\t\t\tei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros })\n\t\t\t\t}\n\t\t\t\tif tsr.EndTimestampMicros > 0 {\n\t\t\t\t\tsi = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros })\n\t\t\t\t}\n\t\t\t\tif si < ei {\n\t\t\t\t\tcopy(cs[si:], cs[ei:])\n\t\t\t\t\tcs = cs[:len(cs)-(ei-si)]\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcs = nil\n\t\t\t}\n\t\t\tif len(cs) == 0 {\n\t\t\t\tdelete(r.cells, col)\n\t\t\t} else {\n\t\t\t\tr.cells[col] = cs\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *server) ReadModifyWriteRow(ctx context.Context, req *btspb.ReadModifyWriteRowRequest) (*btdpb.Row, error) {\n\ts.mu.Lock()\n\ttbl, ok := s.tables[req.TableName]\n\ts.mu.Unlock()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no such table %q\", req.TableName)\n\t}\n\n\tupdates := make(map[string]cell) // copy of updated cells; keyed by full column name\n\n\tr := tbl.mutableRow(string(req.RowKey))\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\t// Assume all mutations apply to the most recent version of the cell.\n\t// TODO(dsymonds): Verify this assumption and document it in the proto.\n\tfor _, rule := range req.Rules {\n\t\tkey := fmt.Sprintf(\"%s:%s\", rule.FamilyName, rule.ColumnQualifier)\n\n\t\tnewCell := false\n\t\tif len(r.cells[key]) == 0 {\n\t\t\tr.cells[key] = []cell{{\n\t\t\t// TODO(dsymonds): should this set a timestamp?\n\t\t\t}}\n\t\t\tnewCell = true\n\t\t}\n\t\tcell := &r.cells[key][0]\n\n\t\tif len(rule.AppendValue) > 0 {\n\t\t\tcell.value = append(cell.value, rule.AppendValue...)\n\t\t}\n\t\tif rule.IncrementAmount != 0 {\n\t\t\tvar v int64\n\t\t\tif !newCell {\n\t\t\t\tif len(cell.value) != 8 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"increment on non-64-bit value\")\n\t\t\t\t}\n\t\t\t\tv = int64(binary.BigEndian.Uint64(cell.value))\n\t\t\t}\n\t\t\tv += rule.IncrementAmount\n\t\t\tvar val [8]byte\n\t\t\tbinary.BigEndian.PutUint64(val[:], uint64(v))\n\t\t\tcell.value = val[:]\n\t\t}\n\t\tupdates[key] = *cell\n\t}\n\n\tres := &btdpb.Row{\n\t\tKey: req.RowKey,\n\t}\n\tfor col, cell := range updates {\n\t\ti := strings.Index(col, \":\")\n\t\tfam, qual := col[:i], col[i+1:]\n\t\tvar f *btdpb.Family\n\t\tfor _, ff := range res.Families {\n\t\t\tif ff.Name == fam {\n\t\t\t\tf = ff\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif f == nil {\n\t\t\tf = &btdpb.Family{Name: fam}\n\t\t\tres.Families = append(res.Families, f)\n\t\t}\n\t\tf.Columns = append(f.Columns, &btdpb.Column{\n\t\t\tQualifier: []byte(qual),\n\t\t\tCells: []*btdpb.Cell{{\n\t\t\t\tValue: cell.value,\n\t\t\t}},\n\t\t})\n\t}\n\treturn res, nil\n}\n\ntype table struct {\n\tmu       sync.RWMutex\n\tfamilies map[string]bool // keyed by plain family name\n\trows     []*row          // sorted by row key\n\trowIndex map[string]*row // indexed by row key\n}\n\nfunc newTable() *table {\n\treturn &table{\n\t\tfamilies: make(map[string]bool),\n\t\trowIndex: make(map[string]*row),\n\t}\n}\n\nfunc (t *table) validTimestamp(ts int64) bool {\n\t// Assume millisecond granularity is required.\n\treturn ts%1000 == 0\n}\n\nfunc (t *table) mutableRow(row string) *row {\n\t// Try fast path first.\n\tt.mu.RLock()\n\tr := t.rowIndex[row]\n\tt.mu.RUnlock()\n\tif r != nil {\n\t\treturn r\n\t}\n\n\t// We probably need to create the row.\n\tt.mu.Lock()\n\tr = t.rowIndex[row]\n\tif r == nil {\n\t\tr = newRow(row)\n\t\tt.rowIndex[row] = r\n\t\tt.rows = append(t.rows, r)\n\t\tsort.Sort(byRowKey(t.rows)) // yay, inefficient!\n\t}\n\tt.mu.Unlock()\n\treturn r\n}\n\ntype byRowKey []*row\n\nfunc (b byRowKey) Len() int           { return len(b) }\nfunc (b byRowKey) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }\nfunc (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key }\n\ntype row struct {\n\tkey string\n\n\tmu    sync.Mutex\n\tcells map[string][]cell // keyed by full column name; cells are in descending timestamp order\n}\n\nfunc newRow(key string) *row {\n\treturn &row{\n\t\tkey:   key,\n\t\tcells: make(map[string][]cell),\n\t}\n}\n\ntype cell struct {\n\tts    int64\n\tvalue []byte\n}\n\ntype byDescTS []cell\n\nfunc (b byDescTS) Len() int           { return len(b) }\nfunc (b byDescTS) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }\nfunc (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts }\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage main\n\n// Command docs are in cbtdoc.go.\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go/format\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/cloud/bigtable\"\n)\n\nvar (\n\t// These get default values from $HOME/.cbtrc if it exists.\n\tproject = flag.String(\"project\", \"\", \"project ID\")\n\tzone    = flag.String(\"zone\", \"\", \"CBT zone\")\n\tcluster = flag.String(\"cluster\", \"\", \"CBT cluster\")\n\tcreds   = flag.String(\"creds\", \"\", \"if set, use application credentials in this file\")\n\n\toFlag = flag.String(\"o\", \"\", \"if set, redirect stdout to this file\")\n\n\tclient      *bigtable.Client\n\tadminClient *bigtable.AdminClient\n)\n\nfunc getClient() *bigtable.Client {\n\tif client == nil {\n\t\tvar err error\n\t\tclient, err = bigtable.NewClient(context.Background(), *project, *zone, *cluster)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Making bigtable.Client: %v\", err)\n\t\t}\n\t}\n\treturn client\n}\n\nfunc getAdminClient() *bigtable.AdminClient {\n\tif adminClient == nil {\n\t\tvar err error\n\t\tadminClient, err = bigtable.NewAdminClient(context.Background(), *project, *zone, *cluster)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Making bigtable.AdminClient: %v\", err)\n\t\t}\n\t}\n\treturn adminClient\n}\n\nfunc configFilename() string {\n\t// TODO(dsymonds): Might need tweaking for Windows.\n\treturn filepath.Join(os.Getenv(\"HOME\"), \".cbtrc\")\n}\n\nfunc loadConfig() {\n\tfilename := configFilename()\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\t// silent fail if the file isn't there\n\t\tif os.IsNotExist(err) {\n\t\t\treturn\n\t\t}\n\t\tlog.Fatalf(\"Reading %s: %v\", filename, err)\n\t}\n\ts := bufio.NewScanner(bytes.NewReader(data))\n\tfor s.Scan() {\n\t\tline := s.Text()\n\t\ti := strings.Index(line, \"=\")\n\t\tif i < 0 {\n\t\t\tlog.Fatalf(\"Bad line in %s: %q\", filename, line)\n\t\t}\n\t\tkey, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])\n\t\tswitch key {\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown key in %s: %q\", filename, key)\n\t\tcase \"project\":\n\t\t\t*project = val\n\t\tcase \"zone\":\n\t\t\t*zone = val\n\t\tcase \"cluster\":\n\t\t\t*cluster = val\n\t\tcase \"creds\":\n\t\t\t*creds = val\n\t\t}\n\t}\n}\n\nfunc main() {\n\tloadConfig()\n\tflag.Usage = usage\n\tflag.Parse()\n\tif *project == \"\" {\n\t\tlog.Fatal(\"Missing -project\")\n\t}\n\tif *zone == \"\" {\n\t\tlog.Fatal(\"Missing -zone\")\n\t}\n\tif *cluster == \"\" {\n\t\tlog.Fatal(\"Missing -cluster\")\n\t}\n\tif *creds != \"\" {\n\t\tos.Setenv(\"GOOGLE_APPLICATION_CREDENTIALS\", *creds)\n\t}\n\tif flag.NArg() == 0 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tif *oFlag != \"\" {\n\t\tf, err := os.Create(*oFlag)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := f.Close(); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}()\n\t\tos.Stdout = f\n\t}\n\n\tctx := context.Background()\n\tfor _, cmd := range commands {\n\t\tif cmd.Name == flag.Arg(0) {\n\t\t\tcmd.do(ctx, flag.Args()[1:]...)\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Fatalf(\"Unknown command %q\", flag.Arg(0))\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] <command> ...\\n\", os.Args[0])\n\tflag.PrintDefaults()\n\tfmt.Fprintf(os.Stderr, \"\\n%s\", cmdSummary)\n}\n\nvar cmdSummary string // generated in init, below\n\nfunc init() {\n\tvar buf bytes.Buffer\n\ttw := tabwriter.NewWriter(&buf, 10, 8, 4, '\\t', 0)\n\tfor _, cmd := range commands {\n\t\tfmt.Fprintf(tw, \"cbt %s\\t%s\\n\", cmd.Name, cmd.Desc)\n\t}\n\ttw.Flush()\n\tbuf.WriteString(configHelp)\n\tcmdSummary = buf.String()\n}\n\nvar configHelp = `\nFor convenience, values of the -project, -zone, -cluster and -creds flags\nmay be specified in ` + configFilename() + ` in this format:\n\tproject = my-project-123\n\tzone = us-central1-b\n\tcluster = my-cluster\n\tcreds = path-to-account-key.json\nAll values are optional, and all will be overridden by flags.\n`\n\nvar commands = []struct {\n\tName, Desc string\n\tdo         func(context.Context, ...string)\n\tUsage      string\n}{\n\t{\n\t\tName:  \"count\",\n\t\tDesc:  \"Count rows in a table\",\n\t\tdo:    doCount,\n\t\tUsage: \"cbt count <table>\",\n\t},\n\t{\n\t\tName:  \"createfamily\",\n\t\tDesc:  \"Create a column family\",\n\t\tdo:    doCreateFamily,\n\t\tUsage: \"cbt createfamily <table> <family>\",\n\t},\n\t{\n\t\tName:  \"createtable\",\n\t\tDesc:  \"Create a table\",\n\t\tdo:    doCreateTable,\n\t\tUsage: \"cbt createtable <table>\",\n\t},\n\t{\n\t\tName:  \"deletefamily\",\n\t\tDesc:  \"Delete a column family\",\n\t\tdo:    doDeleteFamily,\n\t\tUsage: \"cbt deletefamily <table> <family>\",\n\t},\n\t{\n\t\tName:  \"deleterow\",\n\t\tDesc:  \"Delete a row\",\n\t\tdo:    doDeleteRow,\n\t\tUsage: \"cbt deleterow <table> <row>\",\n\t},\n\t{\n\t\tName:  \"deletetable\",\n\t\tDesc:  \"Delete a table\",\n\t\tdo:    doDeleteTable,\n\t\tUsage: \"cbt deletetable <table>\",\n\t},\n\t{\n\t\tName:  \"doc\",\n\t\tDesc:  \"Print documentation for cbt\",\n\t\tdo:    doDoc,\n\t\tUsage: \"cbt doc\",\n\t},\n\t{\n\t\tName:  \"help\",\n\t\tDesc:  \"Print help text\",\n\t\tdo:    doHelp,\n\t\tUsage: \"cbt help [command]\",\n\t},\n\t{\n\t\tName:  \"lookup\",\n\t\tDesc:  \"Read from a single row\",\n\t\tdo:    doLookup,\n\t\tUsage: \"cbt lookup <table> <row>\",\n\t},\n\t{\n\t\tName: \"ls\",\n\t\tDesc: \"List tables and column families\",\n\t\tdo:   doLS,\n\t\tUsage: \"cbt ls\t\t\tList tables\\n\" +\n\t\t\t\"cbt ls <table>\t\tList column families in <table>\",\n\t},\n\t{\n\t\tName: \"read\",\n\t\tDesc: \"Read rows\",\n\t\tdo:   doRead,\n\t\tUsage: \"cbt read <table> [start=<row>] [limit=<row>] [prefix=<prefix>]\\n\" +\n\t\t\t\"  start=<row>\t\tStart reading at this row\\n\" +\n\t\t\t\"  limit=<row>\t\tStop reading before this row\\n\" +\n\t\t\t\"  prefix=<prefix>\tRead rows with this prefix\\n\",\n\t},\n\t{\n\t\tName: \"set\",\n\t\tDesc: \"Set value of a cell\",\n\t\tdo:   doSet,\n\t\tUsage: \"cbt set <table> <row> family:column=val[@ts] ...\\n\" +\n\t\t\t\"  family:column=val[@ts] may be repeated to set multiple cells.\\n\" +\n\t\t\t\"\\n\" +\n\t\t\t\"  ts is an optional integer timestamp.\\n\" +\n\t\t\t\"  If it cannot be parsed, the `@ts` part will be\\n\" +\n\t\t\t\"  interpreted as part of the value.\",\n\t},\n\t{\n\t\tName:  \"setclustersize\",\n\t\tDesc:  \"Set size of a cluster\",\n\t\tdo:    doSetClusterSize,\n\t\tUsage: \"cbt setclustersize <num_nodes>\",\n\t},\n}\n\nfunc doCount(ctx context.Context, args ...string) {\n\tif len(args) != 1 {\n\t\tlog.Fatal(\"usage: cbt count <table>\")\n\t}\n\ttbl := getClient().Open(args[0])\n\n\tn := 0\n\terr := tbl.ReadRows(ctx, bigtable.InfiniteRange(\"\"), func(_ bigtable.Row) bool {\n\t\tn++\n\t\treturn true\n\t}, bigtable.RowFilter(bigtable.StripValueFilter()))\n\tif err != nil {\n\t\tlog.Fatalf(\"Reading rows: %v\", err)\n\t}\n\tfmt.Println(n)\n}\n\nfunc doCreateFamily(ctx context.Context, args ...string) {\n\tif len(args) != 2 {\n\t\tlog.Fatal(\"usage: cbt createfamily <table> <family>\")\n\t}\n\terr := getAdminClient().CreateColumnFamily(ctx, args[0], args[1])\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating column family: %v\", err)\n\t}\n}\n\nfunc doCreateTable(ctx context.Context, args ...string) {\n\tif len(args) != 1 {\n\t\tlog.Fatal(\"usage: cbt createtable <table>\")\n\t}\n\terr := getAdminClient().CreateTable(ctx, args[0])\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating table: %v\", err)\n\t}\n}\n\nfunc doDeleteFamily(ctx context.Context, args ...string) {\n\tif len(args) != 2 {\n\t\tlog.Fatal(\"usage: cbt deletefamily <table> <family>\")\n\t}\n\terr := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1])\n\tif err != nil {\n\t\tlog.Fatalf(\"Deleting column family: %v\", err)\n\t}\n}\n\nfunc doDeleteRow(ctx context.Context, args ...string) {\n\tif len(args) != 2 {\n\t\tlog.Fatal(\"usage: cbt deleterow <table> <row>\")\n\t}\n\ttbl := getClient().Open(args[0])\n\tmut := bigtable.NewMutation()\n\tmut.DeleteRow()\n\tif err := tbl.Apply(ctx, args[1], mut); err != nil {\n\t\tlog.Fatalf(\"Deleting row: %v\", err)\n\t}\n}\n\nfunc doDeleteTable(ctx context.Context, args ...string) {\n\tif len(args) != 1 {\n\t\tlog.Fatalf(\"Can't do `cbt deletetable %s`\", args)\n\t}\n\terr := getAdminClient().DeleteTable(ctx, args[0])\n\tif err != nil {\n\t\tlog.Fatalf(\"Deleting table: %v\", err)\n\t}\n}\n\n// to break circular dependencies\nvar (\n\tdoDocFn  func(ctx context.Context, args ...string)\n\tdoHelpFn func(ctx context.Context, args ...string)\n)\n\nfunc init() {\n\tdoDocFn = doDocReal\n\tdoHelpFn = doHelpReal\n}\n\nfunc doDoc(ctx context.Context, args ...string)  { doDocFn(ctx, args...) }\nfunc doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) }\n\nfunc doDocReal(ctx context.Context, args ...string) {\n\tdata := map[string]interface{}{\n\t\t\"Commands\": commands,\n\t}\n\tvar buf bytes.Buffer\n\tif err := docTemplate.Execute(&buf, data); err != nil {\n\t\tlog.Fatalf(\"Bad doc template: %v\", err)\n\t}\n\tout, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tlog.Fatalf(\"Bad doc output: %v\", err)\n\t}\n\tos.Stdout.Write(out)\n}\n\nvar docTemplate = template.Must(template.New(\"doc\").Funcs(template.FuncMap{\n\t\"indent\": func(s, ind string) string {\n\t\tss := strings.Split(s, \"\\n\")\n\t\tfor i, p := range ss {\n\t\t\tss[i] = ind + p\n\t\t}\n\t\treturn strings.Join(ss, \"\\n\")\n\t},\n}).\n\tParse(`\n// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.\n// Run \"go generate\" to regenerate.\n//go:generate go run cbt.go -o cbtdoc.go doc\n\n/*\nCbt is a tool for doing basic interactions with Cloud Bigtable.\n\nUsage:\n\n\tcbt [options] command [arguments]\n\nThe commands are:\n{{range .Commands}}\n\t{{printf \"%-25s %s\" .Name .Desc}}{{end}}\n\nUse \"cbt help <command>\" for more information about a command.\n\n{{range .Commands}}\n{{.Desc}}\n\nUsage:\n{{indent .Usage \"\\t\"}}\n\n\n\n{{end}}\n*/\npackage main\n`))\n\nfunc doHelpReal(ctx context.Context, args ...string) {\n\tif len(args) == 0 {\n\t\tfmt.Print(cmdSummary)\n\t\treturn\n\t}\n\tfor _, cmd := range commands {\n\t\tif cmd.Name == args[0] {\n\t\t\tfmt.Println(cmd.Usage)\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Fatalf(\"Don't know command %q\", args[0])\n}\n\nfunc doLookup(ctx context.Context, args ...string) {\n\tif len(args) != 2 {\n\t\tlog.Fatalf(\"usage: cbt lookup <table> <row>\")\n\t}\n\ttable, row := args[0], args[1]\n\ttbl := getClient().Open(table)\n\tr, err := tbl.ReadRow(ctx, row)\n\tif err != nil {\n\t\tlog.Fatalf(\"Reading row: %v\", err)\n\t}\n\tprintRow(r)\n}\n\nfunc printRow(r bigtable.Row) {\n\tfmt.Println(strings.Repeat(\"-\", 40))\n\tfmt.Println(r.Key())\n\n\tvar fams []string\n\tfor fam := range r {\n\t\tfams = append(fams, fam)\n\t}\n\tsort.Strings(fams)\n\tfor _, fam := range fams {\n\t\tris := r[fam]\n\t\tsort.Sort(byColumn(ris))\n\t\tfor _, ri := range ris {\n\t\t\tts := time.Unix(0, int64(ri.Timestamp)*1e3)\n\t\t\tfmt.Printf(\"  %-40s @ %s\\n\", ri.Column, ts.Format(\"2006/01/02-15:04:05.000000\"))\n\t\t\tfmt.Printf(\"    %q\\n\", ri.Value)\n\t\t}\n\t}\n}\n\ntype byColumn []bigtable.ReadItem\n\nfunc (b byColumn) Len() int           { return len(b) }\nfunc (b byColumn) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }\nfunc (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }\n\nfunc doLS(ctx context.Context, args ...string) {\n\tswitch len(args) {\n\tdefault:\n\t\tlog.Fatalf(\"Can't do `cbt ls %s`\", args)\n\tcase 0:\n\t\ttables, err := getAdminClient().Tables(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Getting list of tables: %v\", err)\n\t\t}\n\t\tsort.Strings(tables)\n\t\tfor _, table := range tables {\n\t\t\tfmt.Println(table)\n\t\t}\n\tcase 1:\n\t\ttable := args[0]\n\t\tti, err := getAdminClient().TableInfo(ctx, table)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Getting table info: %v\", err)\n\t\t}\n\t\tsort.Strings(ti.Families)\n\t\tfor _, fam := range ti.Families {\n\t\t\tfmt.Println(fam)\n\t\t}\n\t}\n}\n\nfunc doRead(ctx context.Context, args ...string) {\n\tif len(args) < 1 {\n\t\tlog.Fatalf(\"usage: cbt read <table> [args ...]\")\n\t}\n\ttbl := getClient().Open(args[0])\n\n\tparsed := make(map[string]string)\n\tfor _, arg := range args[1:] {\n\t\ti := strings.Index(arg, \"=\")\n\t\tif i < 0 {\n\t\t\tlog.Fatalf(\"Bad arg %q\", arg)\n\t\t}\n\t\tkey, val := arg[:i], arg[i+1:]\n\t\tswitch key {\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown arg key %q\", key)\n\t\tcase \"start\", \"limit\", \"prefix\":\n\t\t\tparsed[key] = val\n\t\t}\n\t}\n\tif (parsed[\"start\"] != \"\" || parsed[\"limit\"] != \"\") && parsed[\"prefix\"] != \"\" {\n\t\tlog.Fatal(`\"start\"/\"limit\" may not be mixed with \"prefix\"`)\n\t}\n\n\tvar rr bigtable.RowRange\n\tif start, limit := parsed[\"start\"], parsed[\"limit\"]; limit != \"\" {\n\t\trr = bigtable.NewRange(start, limit)\n\t} else if start != \"\" {\n\t\trr = bigtable.InfiniteRange(start)\n\t}\n\tif prefix := parsed[\"prefix\"]; prefix != \"\" {\n\t\trr = bigtable.PrefixRange(prefix)\n\t}\n\n\t// TODO(dsymonds): Support filters.\n\terr := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {\n\t\tprintRow(r)\n\t\treturn true\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"Reading rows: %v\", err)\n\t}\n}\n\nvar setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)\n\nfunc doSet(ctx context.Context, args ...string) {\n\tif len(args) < 3 {\n\t\tlog.Fatalf(\"usage: cbt set <table> <row> family:[column]=val[@ts] ...\")\n\t}\n\ttbl := getClient().Open(args[0])\n\trow := args[1]\n\tmut := bigtable.NewMutation()\n\tfor _, arg := range args[2:] {\n\t\tm := setArg.FindStringSubmatch(arg)\n\t\tif m == nil {\n\t\t\tlog.Fatalf(\"Bad set arg %q\", arg)\n\t\t}\n\t\tval := m[3]\n\t\tts := bigtable.Now()\n\t\tif i := strings.LastIndex(val, \"@\"); i >= 0 {\n\t\t\t// Try parsing a timestamp.\n\t\t\tn, err := strconv.ParseInt(val[i+1:], 0, 64)\n\t\t\tif err == nil {\n\t\t\t\tval = val[:i]\n\t\t\t\tts = bigtable.Timestamp(n)\n\t\t\t}\n\t\t}\n\t\tmut.Set(m[1], m[2], ts, []byte(val))\n\t}\n\tif err := tbl.Apply(ctx, row, mut); err != nil {\n\t\tlog.Fatalf(\"Applying mutation: %v\", err)\n\t}\n}\n\nfunc doSetClusterSize(ctx context.Context, args ...string) {\n\tif len(args) != 1 {\n\t\tlog.Fatalf(\"usage: cbt setclustersize <num_nodes>\")\n\t}\n\tn, err := strconv.ParseInt(args[0], 0, 32)\n\tif err != nil {\n\t\tlog.Fatalf(\"Bad num_nodes value %q: %v\", args[0], err)\n\t}\n\tif err := getAdminClient().SetClusterSize(ctx, int(n)); err != nil {\n\t\tlog.Fatalf(\"Setting cluster size: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go",
    "content": "// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.\n// Run \"go generate\" to regenerate.\n//go:generate go run cbt.go -o cbtdoc.go doc\n\n/*\nCbt is a tool for doing basic interactions with Cloud Bigtable.\n\nUsage:\n\n\tcbt [options] command [arguments]\n\nThe commands are:\n\n\tcount                     Count rows in a table\n\tcreatefamily              Create a column family\n\tcreatetable               Create a table\n\tdeletefamily              Delete a column family\n\tdeleterow                 Delete a row\n\tdeletetable               Delete a table\n\tdoc                       Print documentation for cbt\n\thelp                      Print help text\n\tlookup                    Read from a single row\n\tls                        List tables and column families\n\tread                      Read rows\n\tset                       Set value of a cell\n\tsetclustersize            Set size of a cluster\n\nUse \"cbt help <command>\" for more information about a command.\n\n\nCount rows in a table\n\nUsage:\n\tcbt count <table>\n\n\n\n\nCreate a column family\n\nUsage:\n\tcbt createfamily <table> <family>\n\n\n\n\nCreate a table\n\nUsage:\n\tcbt createtable <table>\n\n\n\n\nDelete a column family\n\nUsage:\n\tcbt deletefamily <table> <family>\n\n\n\n\nDelete a row\n\nUsage:\n\tcbt deleterow <table> <row>\n\n\n\n\nDelete a table\n\nUsage:\n\tcbt deletetable <table>\n\n\n\n\nPrint documentation for cbt\n\nUsage:\n\tcbt doc\n\n\n\n\nPrint help text\n\nUsage:\n\tcbt help [command]\n\n\n\n\nRead from a single row\n\nUsage:\n\tcbt lookup <table> <row>\n\n\n\n\nList tables and column families\n\nUsage:\n\tcbt ls\t\t\tList tables\n\tcbt ls <table>\t\tList column families in <table>\n\n\n\n\nRead rows\n\nUsage:\n\tcbt read <table> [start=<row>] [limit=<row>] [prefix=<prefix>]\n\t  start=<row>\t\tStart reading at this row\n\t  limit=<row>\t\tStop reading before this row\n\t  prefix=<prefix>\tRead rows with this prefix\n\n\n\n\n\nSet value of a cell\n\nUsage:\n\tcbt set <table> <row> family:column=val[@ts] ...\n\t  family:column=val[@ts] may be repeated to set multiple cells.\n\n\t  ts is an optional integer timestamp.\n\t  If it cannot be parsed, the `@ts` part will be\n\t  interpreted as part of the value.\n\n\n\n\nSet size of a cluster\n\nUsage:\n\tcbt setclustersize <num_nodes>\n\n\n\n\n*/\npackage main\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/doc.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nPackage bigtable is an API to Google Cloud Bigtable.\n\nSee https://cloud.google.com/bigtable/docs/ for general product documentation.\n\nSetup and Credentials\n\nUse NewClient or NewAdminClient to create a client that can be used to access\nthe data or admin APIs respectively. Both require credentials that have permission\nto access the Cloud Bigtable API.\n\nIf your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials\n(https://developers.google.com/accounts/docs/application-default-credentials)\nis the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called.\n\nTo use alternate credentials, pass them to NewClient or NewAdminClient using cloud.WithTokenSource.\nFor instance, you can use service account credentials by visiting\nhttps://cloud.google.com/console/project/MYPROJECT/apiui/credential,\ncreating a new OAuth \"Client ID\", storing the JSON key somewhere accessible, and writing\n\tjsonKey, err := ioutil.ReadFile(pathToKeyFile)\n\t...\n\tconfig, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc.\n\t...\n\tclient, err := bigtable.NewClient(ctx, project, zone, cluster, cloud.WithTokenSource(config.TokenSource()))\n\t...\nHere, `google` means the golang.org/x/oauth2/google package\nand `cloud` means the google.golang.org/cloud package.\n\nReading\n\nThe principal way to read from a Bigtable is to use the ReadRows method on *Table.\nA RowRange specifies a contiguous portion of a table. A Filter may be provided through\nRowFilter to limit or transform the data that is returned.\n\ttbl := client.Open(\"mytable\")\n\t...\n\t// Read all the rows starting with \"com.google.\",\n\t// but only fetch the columns in the \"links\" family.\n\trr := bigtable.PrefixRange(\"com.google.\")\n\terr := tbl.ReadRows(ctx, rr, func(r Row) bool {\n\t\t// do something with r\n\t\treturn true // keep going\n\t}, bigtable.RowFilter(bigtable.FamilyFilter(\"links\")))\n\t...\n\nTo read a single row, use the ReadRow helper method.\n\tr, err := tbl.ReadRow(ctx, \"com.google.cloud\") // \"com.google.cloud\" is the entire row key\n\t...\n\nWriting\n\nThis API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite.\nThe former expresses idempotent operations.\nThe latter expresses non-idempotent operations and returns the new values of updated cells.\nThese operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite),\nbuilding up one or more operations on that, and then using the Apply or ApplyReadModifyWrite\nmethods on a Table.\n\nFor instance, to set a couple of cells in a table,\n\ttbl := client.Open(\"mytable\")\n\tmut := bigtable.NewMutation()\n\tmut.Set(\"links\", \"maps.google.com\", bigtable.Now(), []byte(\"1\"))\n\tmut.Set(\"links\", \"golang.org\", bigtable.Now(), []byte(\"1\"))\n\terr := tbl.Apply(ctx, \"com.google.cloud\", mut)\n\t...\n\nTo increment an encoded value in one cell,\n\ttbl := client.Open(\"mytable\")\n\trmw := bigtable.NewReadModifyWrite()\n\trmw.Increment(\"links\", \"golang.org\", 12) // add 12 to the cell in column \"links:golang.org\"\n\tr, err := tbl.ApplyReadModifyWrite(ctx, \"com.google.cloud\", rmw)\n\t...\n*/\npackage bigtable\n\n// Scope constants for authentication credentials.\n// These should be used when using credential creation functions such as credentials.NewServiceAccountFromFile.\nconst (\n\t// Scope is the OAuth scope for Cloud Bigtable data operations.\n\tScope = \"https://www.googleapis.com/auth/bigtable.data\"\n\t// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations.\n\tReadonlyScope = \"https://www.googleapis.com/auth/bigtable.readonly\"\n\n\t// AdminScope is the OAuth scope for Cloud Bigtable admin operations.\n\tAdminScope = \"https://www.googleapis.com/auth/bigtable.admin\"\n)\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/filter.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage bigtable\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tbtdpb \"google.golang.org/cloud/bigtable/internal/data_proto\"\n)\n\n// A Filter represents a row filter.\ntype Filter interface {\n\tString() string\n\tproto() *btdpb.RowFilter\n}\n\n// ChainFilters returns a filter that applies a sequence of filters.\nfunc ChainFilters(sub ...Filter) Filter { return chainFilter{sub} }\n\ntype chainFilter struct {\n\tsub []Filter\n}\n\nfunc (cf chainFilter) String() string {\n\tvar ss []string\n\tfor _, sf := range cf.sub {\n\t\tss = append(ss, sf.String())\n\t}\n\treturn \"(\" + strings.Join(ss, \" | \") + \")\"\n}\n\nfunc (cf chainFilter) proto() *btdpb.RowFilter {\n\tf := &btdpb.RowFilter{\n\t\tChain: &btdpb.RowFilter_Chain{},\n\t}\n\tfor _, sf := range cf.sub {\n\t\tf.Chain.Filters = append(f.Chain.Filters, sf.proto())\n\t}\n\treturn f\n}\n\n// InterleaveFilters returns a filter that applies a set of filters in parallel\n// and interleaves the results.\nfunc InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} }\n\ntype interleaveFilter struct {\n\tsub []Filter\n}\n\nfunc (ilf interleaveFilter) String() string {\n\tvar ss []string\n\tfor _, sf := range ilf.sub {\n\t\tss = append(ss, sf.String())\n\t}\n\treturn \"(\" + strings.Join(ss, \" + \") + \")\"\n}\n\nfunc (ilf interleaveFilter) proto() *btdpb.RowFilter {\n\tf := &btdpb.RowFilter{\n\t\tInterleave: &btdpb.RowFilter_Interleave{},\n\t}\n\tfor _, sf := range ilf.sub {\n\t\tf.Interleave.Filters = append(f.Interleave.Filters, sf.proto())\n\t}\n\treturn f\n}\n\n// RowKeyFilter returns a filter that matches cells from rows whose\n// key matches the provided RE2 pattern.\n// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.\nfunc RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) }\n\ntype rowKeyFilter string\n\nfunc (rkf rowKeyFilter) String() string { return fmt.Sprintf(\"row(%s)\", string(rkf)) }\n\nfunc (rkf rowKeyFilter) proto() *btdpb.RowFilter {\n\treturn &btdpb.RowFilter{RowKeyRegexFilter: []byte(rkf)}\n}\n\n// FamilyFilter returns a filter that matches cells whose family name\n// matches the provided RE2 pattern.\n// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.\nfunc FamilyFilter(pattern string) Filter { return familyFilter(pattern) }\n\ntype familyFilter string\n\nfunc (ff familyFilter) String() string { return fmt.Sprintf(\"col(%s:)\", string(ff)) }\n\nfunc (ff familyFilter) proto() *btdpb.RowFilter {\n\treturn &btdpb.RowFilter{FamilyNameRegexFilter: string(ff)}\n}\n\n// ColumnFilter returns a filter that matches cells whose column name\n// matches the provided RE2 pattern.\n// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.\nfunc ColumnFilter(pattern string) Filter { return columnFilter(pattern) }\n\ntype columnFilter string\n\nfunc (cf columnFilter) String() string { return fmt.Sprintf(\"col(.*:%s)\", string(cf)) }\n\nfunc (cf columnFilter) proto() *btdpb.RowFilter {\n\treturn &btdpb.RowFilter{ColumnQualifierRegexFilter: []byte(cf)}\n}\n\n// ValueFilter returns a filter that matches cells whose value\n// matches the provided RE2 pattern.\n// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.\nfunc ValueFilter(pattern string) Filter { return valueFilter(pattern) }\n\ntype valueFilter string\n\nfunc (vf valueFilter) String() string { return fmt.Sprintf(\"value_match(%s)\", string(vf)) }\n\nfunc (vf valueFilter) proto() *btdpb.RowFilter {\n\treturn &btdpb.RowFilter{ValueRegexFilter: []byte(vf)}\n}\n\n// LatestNFilter returns a filter that matches the most recent N cells in each column.\nfunc LatestNFilter(n int) Filter { return latestNFilter(n) }\n\ntype latestNFilter int32\n\nfunc (lnf latestNFilter) String() string { return fmt.Sprintf(\"col(*,%d)\", lnf) }\n\nfunc (lnf latestNFilter) proto() *btdpb.RowFilter {\n\treturn &btdpb.RowFilter{CellsPerColumnLimitFilter: int32(lnf)}\n}\n\n// StripValueFilter returns a filter that replaces each value with the empty string.\nfunc StripValueFilter() Filter { return stripValueFilter{} }\n\ntype stripValueFilter struct{}\n\nfunc (stripValueFilter) String() string          { return \"strip_value()\" }\nfunc (stripValueFilter) proto() *btdpb.RowFilter { return &btdpb.RowFilter{StripValueTransformer: true} }\n\n// TODO(dsymonds): More filters: cond, col/ts/value range, sampling\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto\n// DO NOT EDIT!\n\n/*\nPackage google_bigtable_admin_cluster_v1 is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto\n\nIt has these top-level messages:\n\tZone\n\tCluster\n*/\npackage google_bigtable_admin_cluster_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\n// Possible states of a zone.\ntype Zone_Status int32\n\nconst (\n\t// The state of the zone is unknown or unspecified.\n\tZone_UNKNOWN Zone_Status = 0\n\t// The zone is in a good state.\n\tZone_OK Zone_Status = 1\n\t// The zone is down for planned maintenance.\n\tZone_PLANNED_MAINTENANCE Zone_Status = 2\n\t// The zone is down for emergency or unplanned maintenance.\n\tZone_EMERGENCY_MAINENANCE Zone_Status = 3\n)\n\nvar Zone_Status_name = map[int32]string{\n\t0: \"UNKNOWN\",\n\t1: \"OK\",\n\t2: \"PLANNED_MAINTENANCE\",\n\t3: \"EMERGENCY_MAINENANCE\",\n}\nvar Zone_Status_value = map[string]int32{\n\t\"UNKNOWN\": 0,\n\t\"OK\":      1,\n\t\"PLANNED_MAINTENANCE\":  2,\n\t\"EMERGENCY_MAINENANCE\": 3,\n}\n\nfunc (x Zone_Status) String() string {\n\treturn proto.EnumName(Zone_Status_name, int32(x))\n}\n\n// A physical location in which a particular project can allocate Cloud BigTable\n// resources.\ntype Zone struct {\n\t// A permanent unique identifier for the zone.\n\t// Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The name of this zone as it appears in UIs.\n\tDisplayName string `protobuf:\"bytes,2,opt,name=display_name\" json:\"display_name,omitempty\"`\n\t// The current state of this zone.\n\tStatus Zone_Status `protobuf:\"varint,3,opt,name=status,enum=google.bigtable.admin.cluster.v1.Zone_Status\" json:\"status,omitempty\"`\n}\n\nfunc (m *Zone) Reset()         { *m = Zone{} }\nfunc (m *Zone) String() string { return proto.CompactTextString(m) }\nfunc (*Zone) ProtoMessage()    {}\n\n// An isolated set of Cloud BigTable resources on which tables can be hosted.\ntype Cluster struct {\n\t// A permanent unique identifier for the cluster. For technical reasons, the\n\t// zone in which the cluster resides is included here.\n\t// Values are of the form\n\t// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The descriptive name for this cluster as it appears in UIs.\n\t// Must be unique per zone.\n\tDisplayName string `protobuf:\"bytes,4,opt,name=display_name\" json:\"display_name,omitempty\"`\n\t// The number of serve nodes allocated to this cluster.\n\tServeNodes int32 `protobuf:\"varint,5,opt,name=serve_nodes\" json:\"serve_nodes,omitempty\"`\n\t// The maximum HDD storage usage allowed in this cluster, in bytes.\n\tHddBytes int64 `protobuf:\"varint,6,opt,name=hdd_bytes\" json:\"hdd_bytes,omitempty\"`\n\t// The maximum SSD storage usage allowed in this cluster, in bytes.\n\tSsdBytes int64 `protobuf:\"varint,7,opt,name=ssd_bytes\" json:\"ssd_bytes,omitempty\"`\n}\n\nfunc (m *Cluster) Reset()         { *m = Cluster{} }\nfunc (m *Cluster) String() string { return proto.CompactTextString(m) }\nfunc (*Cluster) ProtoMessage()    {}\n\nfunc init() {\n\tproto.RegisterEnum(\"google.bigtable.admin.cluster.v1.Zone_Status\", Zone_Status_name, Zone_Status_value)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.admin.cluster.v1;\n\n\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableClusterDataProto\";\noption java_package = \"com.google.bigtable.admin.cluster.v1\";\n\n\n// A physical location in which a particular project can allocate Cloud BigTable\n// resources.\nmessage Zone {\n  // Possible states of a zone.\n  enum Status {\n    // The state of the zone is unknown or unspecified.\n    UNKNOWN = 0;\n\n    // The zone is in a good state.\n    OK = 1;\n\n    // The zone is down for planned maintenance.\n    PLANNED_MAINTENANCE = 2;\n\n    // The zone is down for emergency or unplanned maintenance.\n    EMERGENCY_MAINENANCE = 3;\n  }\n\n  // A permanent unique identifier for the zone.\n  // Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*\n  string name = 1;\n\n  // The name of this zone as it appears in UIs.\n  string display_name = 2;\n\n  // The current state of this zone.\n  Status status = 3;\n}\n\n// An isolated set of Cloud BigTable resources on which tables can be hosted.\nmessage Cluster {\n  // A permanent unique identifier for the cluster. For technical reasons, the\n  // zone in which the cluster resides is included here.\n  // Values are of the form\n  // projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*\n  string name = 1;\n\n  // If this cluster has been deleted, the time at which its backup will\n  // be irrevocably destroyed. Omitted otherwise.\n  // This cannot be set directly, only through DeleteCluster.\n\n  // The operation currently running on the cluster, if any.\n  // This cannot be set directly, only through CreateCluster, UpdateCluster,\n  // or UndeleteCluster. Calls to these methods will be rejected if\n  // \"current_operation\" is already set.\n\n  // The descriptive name for this cluster as it appears in UIs.\n  // Must be unique per zone.\n  string display_name = 4;\n\n  // The number of serve nodes allocated to this cluster.\n  int32 serve_nodes = 5;\n\n  // The maximum HDD storage usage allowed in this cluster, in bytes.\n  int64 hdd_bytes = 6;\n\n  // The maximum SSD storage usage allowed in this cluster, in bytes.\n  int64 ssd_bytes = 7;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto\n// DO NOT EDIT!\n\npackage google_bigtable_admin_cluster_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport google_bigtable_admin_cluster_v11 \"google.golang.org/cloud/bigtable/internal/cluster_data_proto\"\nimport google_protobuf \"google.golang.org/cloud/bigtable/internal/empty\"\n\nimport (\n\tcontext \"golang.org/x/net/context\"\n\tgrpc \"google.golang.org/grpc\"\n)\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ context.Context\nvar _ grpc.ClientConn\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\nfunc init() {\n}\n\n// Client API for BigtableClusterService service\n\ntype BigtableClusterServiceClient interface {\n\t// Lists the supported zones for the given project.\n\tListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error)\n\t// Gets information about a particular cluster.\n\tGetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error)\n\t// Lists all clusters in the given project, along with any zones for which\n\t// cluster information could not be retrieved.\n\tListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)\n\t// Creates a cluster and begins preparing it to begin serving. The returned\n\t// cluster embeds as its \"current_operation\" a long-running operation which\n\t// can be used to track the progress of turning up the new cluster.\n\t// Immediately upon completion of this request:\n\t//  * The cluster will be readable via the API, with all requested attributes\n\t//    but no allocated resources.\n\t// Until completion of the embedded operation:\n\t//  * Cancelling the operation will render the cluster immediately unreadable\n\t//    via the API.\n\t//  * All other attempts to modify or delete the cluster will be rejected.\n\t// Upon completion of the embedded operation:\n\t//  * Billing for all successfully-allocated resources will begin (some types\n\t//    may have lower than the requested levels).\n\t//  * New tables can be created in the cluster.\n\t//  * The cluster's allocated resource levels will be readable via the API.\n\t// The embedded operation's \"metadata\" field type is\n\t// [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's \"response\" field type is\n\t// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.\n\tCreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error)\n\t// Updates a cluster, and begins allocating or releasing resources as\n\t// requested. The returned cluster embeds as its \"current_operation\" a\n\t// long-running operation which can be used to track the progress of updating\n\t// the cluster.\n\t// Immediately upon completion of this request:\n\t//  * For resource types where a decrease in the cluster's allocation has been\n\t//    requested, billing will be based on the newly-requested level.\n\t// Until completion of the embedded operation:\n\t//  * Cancelling the operation will set its metadata's \"cancelled_at_time\",\n\t//    and begin restoring resources to their pre-request values. The operation\n\t//    is guaranteed to succeed at undoing all resource changes, after which\n\t//    point it will terminate with a CANCELLED status.\n\t//  * All other attempts to modify or delete the cluster will be rejected.\n\t//  * Reading the cluster via the API will continue to give the pre-request\n\t//    resource levels.\n\t// Upon completion of the embedded operation:\n\t//  * Billing will begin for all successfully-allocated resources (some types\n\t//    may have lower than the requested levels).\n\t//  * All newly-reserved resources will be available for serving the cluster's\n\t//    tables.\n\t//  * The cluster's new resource levels will be readable via the API.\n\t// [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's \"response\" field type is\n\t// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.\n\tUpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error)\n\t// Marks a cluster and all of its tables for permanent deletion in 7 days.\n\t// Immediately upon completion of the request:\n\t//  * Billing will cease for all of the cluster's reserved resources.\n\t//  * The cluster's \"delete_time\" field will be set 7 days in the future.\n\t// Soon afterward:\n\t//  * All tables within the cluster will become unavailable.\n\t// Prior to the cluster's \"delete_time\":\n\t//  * The cluster can be recovered with a call to UndeleteCluster.\n\t//  * All other attempts to modify or delete the cluster will be rejected.\n\t// At the cluster's \"delete_time\":\n\t//  * The cluster and *all of its tables* will immediately and irrevocably\n\t//    disappear from the API, and their data will be permanently deleted.\n\tDeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)\n}\n\ntype bigtableClusterServiceClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewBigtableClusterServiceClient(cc *grpc.ClientConn) BigtableClusterServiceClient {\n\treturn &bigtableClusterServiceClient{cc}\n}\n\nfunc (c *bigtableClusterServiceClient) ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) {\n\tout := new(ListZonesResponse)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableClusterServiceClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) {\n\tout := new(google_bigtable_admin_cluster_v11.Cluster)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableClusterServiceClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) {\n\tout := new(ListClustersResponse)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableClusterServiceClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) {\n\tout := new(google_bigtable_admin_cluster_v11.Cluster)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableClusterServiceClient) UpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) {\n\tout := new(google_bigtable_admin_cluster_v11.Cluster)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableClusterServiceClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {\n\tout := new(google_protobuf.Empty)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// Server API for BigtableClusterService service\n\ntype BigtableClusterServiceServer interface {\n\t// Lists the supported zones for the given project.\n\tListZones(context.Context, *ListZonesRequest) (*ListZonesResponse, error)\n\t// Gets information about a particular cluster.\n\tGetCluster(context.Context, *GetClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error)\n\t// Lists all clusters in the given project, along with any zones for which\n\t// cluster information could not be retrieved.\n\tListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)\n\t// Creates a cluster and begins preparing it to begin serving. The returned\n\t// cluster embeds as its \"current_operation\" a long-running operation which\n\t// can be used to track the progress of turning up the new cluster.\n\t// Immediately upon completion of this request:\n\t//  * The cluster will be readable via the API, with all requested attributes\n\t//    but no allocated resources.\n\t// Until completion of the embedded operation:\n\t//  * Cancelling the operation will render the cluster immediately unreadable\n\t//    via the API.\n\t//  * All other attempts to modify or delete the cluster will be rejected.\n\t// Upon completion of the embedded operation:\n\t//  * Billing for all successfully-allocated resources will begin (some types\n\t//    may have lower than the requested levels).\n\t//  * New tables can be created in the cluster.\n\t//  * The cluster's allocated resource levels will be readable via the API.\n\t// The embedded operation's \"metadata\" field type is\n\t// [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's \"response\" field type is\n\t// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.\n\tCreateCluster(context.Context, *CreateClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error)\n\t// Updates a cluster, and begins allocating or releasing resources as\n\t// requested. The returned cluster embeds as its \"current_operation\" a\n\t// long-running operation which can be used to track the progress of updating\n\t// the cluster.\n\t// Immediately upon completion of this request:\n\t//  * For resource types where a decrease in the cluster's allocation has been\n\t//    requested, billing will be based on the newly-requested level.\n\t// Until completion of the embedded operation:\n\t//  * Cancelling the operation will set its metadata's \"cancelled_at_time\",\n\t//    and begin restoring resources to their pre-request values. The operation\n\t//    is guaranteed to succeed at undoing all resource changes, after which\n\t//    point it will terminate with a CANCELLED status.\n\t//  * All other attempts to modify or delete the cluster will be rejected.\n\t//  * Reading the cluster via the API will continue to give the pre-request\n\t//    resource levels.\n\t// Upon completion of the embedded operation:\n\t//  * Billing will begin for all successfully-allocated resources (some types\n\t//    may have lower than the requested levels).\n\t//  * All newly-reserved resources will be available for serving the cluster's\n\t//    tables.\n\t//  * The cluster's new resource levels will be readable via the API.\n\t// [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's \"response\" field type is\n\t// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.\n\tUpdateCluster(context.Context, *google_bigtable_admin_cluster_v11.Cluster) (*google_bigtable_admin_cluster_v11.Cluster, error)\n\t// Marks a cluster and all of its tables for permanent deletion in 7 days.\n\t// Immediately upon completion of the request:\n\t//  * Billing will cease for all of the cluster's reserved resources.\n\t//  * The cluster's \"delete_time\" field will be set 7 days in the future.\n\t// Soon afterward:\n\t//  * All tables within the cluster will become unavailable.\n\t// Prior to the cluster's \"delete_time\":\n\t//  * The cluster can be recovered with a call to UndeleteCluster.\n\t//  * All other attempts to modify or delete the cluster will be rejected.\n\t// At the cluster's \"delete_time\":\n\t//  * The cluster and *all of its tables* will immediately and irrevocably\n\t//    disappear from the API, and their data will be permanently deleted.\n\tDeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf.Empty, error)\n}\n\nfunc RegisterBigtableClusterServiceServer(s *grpc.Server, srv BigtableClusterServiceServer) {\n\ts.RegisterService(&_BigtableClusterService_serviceDesc, srv)\n}\n\nfunc _BigtableClusterService_ListZones_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(ListZonesRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableClusterServiceServer).ListZones(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableClusterService_GetCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(GetClusterRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableClusterServiceServer).GetCluster(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableClusterService_ListClusters_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(ListClustersRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableClusterServiceServer).ListClusters(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableClusterService_CreateCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(CreateClusterRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableClusterServiceServer).CreateCluster(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableClusterService_UpdateCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(google_bigtable_admin_cluster_v11.Cluster)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableClusterServiceServer).UpdateCluster(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableClusterService_DeleteCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(DeleteClusterRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableClusterServiceServer).DeleteCluster(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nvar _BigtableClusterService_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"google.bigtable.admin.cluster.v1.BigtableClusterService\",\n\tHandlerType: (*BigtableClusterServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"ListZones\",\n\t\t\tHandler:    _BigtableClusterService_ListZones_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"GetCluster\",\n\t\t\tHandler:    _BigtableClusterService_GetCluster_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ListClusters\",\n\t\t\tHandler:    _BigtableClusterService_ListClusters_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"CreateCluster\",\n\t\t\tHandler:    _BigtableClusterService_CreateCluster_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"UpdateCluster\",\n\t\t\tHandler:    _BigtableClusterService_UpdateCluster_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DeleteCluster\",\n\t\t\tHandler:    _BigtableClusterService_DeleteCluster_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{},\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.admin.cluster.v1;\n\nimport \"google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto\";\nimport \"google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto\";\nimport \"google.golang.org/cloud/bigtable/internal/empty/empty.proto\";\n\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableClusterServicesProto\";\noption java_package = \"com.google.bigtable.admin.cluster.v1\";\n\n\n// Service for managing zonal Cloud Bigtable resources.\nservice BigtableClusterService {\n  // Lists the supported zones for the given project.\n  rpc ListZones(ListZonesRequest) returns (ListZonesResponse) {\n  }\n\n  // Gets information about a particular cluster.\n  rpc GetCluster(GetClusterRequest) returns (Cluster) {\n  }\n\n  // Lists all clusters in the given project, along with any zones for which\n  // cluster information could not be retrieved.\n  rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {\n  }\n\n  // Creates a cluster and begins preparing it to begin serving. The returned\n  // cluster embeds as its \"current_operation\" a long-running operation which\n  // can be used to track the progress of turning up the new cluster.\n  // Immediately upon completion of this request:\n  //  * The cluster will be readable via the API, with all requested attributes\n  //    but no allocated resources.\n  // Until completion of the embedded operation:\n  //  * Cancelling the operation will render the cluster immediately unreadable\n  //    via the API.\n  //  * All other attempts to modify or delete the cluster will be rejected.\n  // Upon completion of the embedded operation:\n  //  * Billing for all successfully-allocated resources will begin (some types\n  //    may have lower than the requested levels).\n  //  * New tables can be created in the cluster.\n  //  * The cluster's allocated resource levels will be readable via the API.\n  // The embedded operation's \"metadata\" field type is\n  // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's \"response\" field type is\n  // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.\n  rpc CreateCluster(CreateClusterRequest) returns (Cluster) {\n  }\n\n  // Updates a cluster, and begins allocating or releasing resources as\n  // requested. The returned cluster embeds as its \"current_operation\" a\n  // long-running operation which can be used to track the progress of updating\n  // the cluster.\n  // Immediately upon completion of this request:\n  //  * For resource types where a decrease in the cluster's allocation has been\n  //    requested, billing will be based on the newly-requested level.\n  // Until completion of the embedded operation:\n  //  * Cancelling the operation will set its metadata's \"cancelled_at_time\",\n  //    and begin restoring resources to their pre-request values. The operation\n  //    is guaranteed to succeed at undoing all resource changes, after which\n  //    point it will terminate with a CANCELLED status.\n  //  * All other attempts to modify or delete the cluster will be rejected.\n  //  * Reading the cluster via the API will continue to give the pre-request\n  //    resource levels.\n  // Upon completion of the embedded operation:\n  //  * Billing will begin for all successfully-allocated resources (some types\n  //    may have lower than the requested levels).\n  //  * All newly-reserved resources will be available for serving the cluster's\n  //    tables.\n  //  * The cluster's new resource levels will be readable via the API.\n  // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's \"response\" field type is\n  // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.\n  rpc UpdateCluster(Cluster) returns (Cluster) {\n  }\n\n  // Marks a cluster and all of its tables for permanent deletion in 7 days.\n  // Immediately upon completion of the request:\n  //  * Billing will cease for all of the cluster's reserved resources.\n  //  * The cluster's \"delete_time\" field will be set 7 days in the future.\n  // Soon afterward:\n  //  * All tables within the cluster will become unavailable.\n  // Prior to the cluster's \"delete_time\":\n  //  * The cluster can be recovered with a call to UndeleteCluster.\n  //  * All other attempts to modify or delete the cluster will be rejected.\n  // At the cluster's \"delete_time\":\n  //  * The cluster and *all of its tables* will immediately and irrevocably\n  //    disappear from the API, and their data will be permanently deleted.\n  rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {\n  }\n\n  // Cancels the scheduled deletion of an cluster and begins preparing it to\n  // resume serving. The returned operation will also be embedded as the\n  // cluster's \"current_operation\".\n  // Immediately upon completion of this request:\n  //  * The cluster's \"delete_time\" field will be unset, protecting it from\n  //    automatic deletion.\n  // Until completion of the returned operation:\n  //  * The operation cannot be cancelled.\n  // Upon completion of the returned operation:\n  //  * Billing for the cluster's resources will resume.\n  //  * All tables within the cluster will be available.\n  // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's \"response\" field type is\n  // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto\n// DO NOT EDIT!\n\n/*\nPackage google_bigtable_admin_cluster_v1 is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto\n\tgoogle.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto\n\nIt has these top-level messages:\n\tListZonesRequest\n\tListZonesResponse\n\tGetClusterRequest\n\tListClustersRequest\n\tListClustersResponse\n\tCreateClusterRequest\n\tCreateClusterMetadata\n\tUpdateClusterMetadata\n\tDeleteClusterRequest\n\tUndeleteClusterRequest\n\tUndeleteClusterMetadata\n*/\npackage google_bigtable_admin_cluster_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport google_bigtable_admin_cluster_v11 \"google.golang.org/cloud/bigtable/internal/cluster_data_proto\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\n// Request message for BigtableClusterService.ListZones.\ntype ListZonesRequest struct {\n\t// The unique name of the project for which a list of supported zones is\n\t// requested.\n\t// Values are of the form projects/<project>\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *ListZonesRequest) Reset()         { *m = ListZonesRequest{} }\nfunc (m *ListZonesRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ListZonesRequest) ProtoMessage()    {}\n\n// Response message for BigtableClusterService.ListZones.\ntype ListZonesResponse struct {\n\t// The list of requested zones.\n\tZones []*google_bigtable_admin_cluster_v11.Zone `protobuf:\"bytes,1,rep,name=zones\" json:\"zones,omitempty\"`\n}\n\nfunc (m *ListZonesResponse) Reset()         { *m = ListZonesResponse{} }\nfunc (m *ListZonesResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ListZonesResponse) ProtoMessage()    {}\n\nfunc (m *ListZonesResponse) GetZones() []*google_bigtable_admin_cluster_v11.Zone {\n\tif m != nil {\n\t\treturn m.Zones\n\t}\n\treturn nil\n}\n\n// Request message for BigtableClusterService.GetCluster.\ntype GetClusterRequest struct {\n\t// The unique name of the requested cluster.\n\t// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *GetClusterRequest) Reset()         { *m = GetClusterRequest{} }\nfunc (m *GetClusterRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetClusterRequest) ProtoMessage()    {}\n\n// Request message for BigtableClusterService.ListClusters.\ntype ListClustersRequest struct {\n\t// The unique name of the project for which a list of clusters is requested.\n\t// Values are of the form projects/<project>\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *ListClustersRequest) Reset()         { *m = ListClustersRequest{} }\nfunc (m *ListClustersRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ListClustersRequest) ProtoMessage()    {}\n\n// Response message for BigtableClusterService.ListClusters.\ntype ListClustersResponse struct {\n\t// The list of requested Clusters.\n\tClusters []*google_bigtable_admin_cluster_v11.Cluster `protobuf:\"bytes,1,rep,name=clusters\" json:\"clusters,omitempty\"`\n\t// The zones for which clusters could not be retrieved.\n\tFailedZones []*google_bigtable_admin_cluster_v11.Zone `protobuf:\"bytes,2,rep,name=failed_zones\" json:\"failed_zones,omitempty\"`\n}\n\nfunc (m *ListClustersResponse) Reset()         { *m = ListClustersResponse{} }\nfunc (m *ListClustersResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ListClustersResponse) ProtoMessage()    {}\n\nfunc (m *ListClustersResponse) GetClusters() []*google_bigtable_admin_cluster_v11.Cluster {\n\tif m != nil {\n\t\treturn m.Clusters\n\t}\n\treturn nil\n}\n\nfunc (m *ListClustersResponse) GetFailedZones() []*google_bigtable_admin_cluster_v11.Zone {\n\tif m != nil {\n\t\treturn m.FailedZones\n\t}\n\treturn nil\n}\n\n// Request message for BigtableClusterService.CreateCluster.\ntype CreateClusterRequest struct {\n\t// The unique name of the zone in which to create the cluster.\n\t// Values are of the form projects/<project>/zones/<zone>\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The id to be used when referring to the new cluster within its zone,\n\t// e.g. just the \"test-cluster\" section of the full name\n\t// \"projects/<project>/zones/<zone>/clusters/test-cluster\".\n\tClusterId string `protobuf:\"bytes,2,opt,name=cluster_id\" json:\"cluster_id,omitempty\"`\n\t// The cluster to create.\n\t// The \"name\", \"delete_time\", and \"current_operation\" fields must be left\n\t// blank.\n\tCluster *google_bigtable_admin_cluster_v11.Cluster `protobuf:\"bytes,3,opt,name=cluster\" json:\"cluster,omitempty\"`\n}\n\nfunc (m *CreateClusterRequest) Reset()         { *m = CreateClusterRequest{} }\nfunc (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateClusterRequest) ProtoMessage()    {}\n\nfunc (m *CreateClusterRequest) GetCluster() *google_bigtable_admin_cluster_v11.Cluster {\n\tif m != nil {\n\t\treturn m.Cluster\n\t}\n\treturn nil\n}\n\n// Metadata type for the operation returned by\n// BigtableClusterService.CreateCluster.\ntype CreateClusterMetadata struct {\n\t// The request which prompted the creation of this operation.\n\tOriginalRequest *CreateClusterRequest `protobuf:\"bytes,1,opt,name=original_request\" json:\"original_request,omitempty\"`\n}\n\nfunc (m *CreateClusterMetadata) Reset()         { *m = CreateClusterMetadata{} }\nfunc (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) }\nfunc (*CreateClusterMetadata) ProtoMessage()    {}\n\nfunc (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest {\n\tif m != nil {\n\t\treturn m.OriginalRequest\n\t}\n\treturn nil\n}\n\n// Metadata type for the operation returned by\n// BigtableClusterService.UpdateCluster.\ntype UpdateClusterMetadata struct {\n\t// The request which prompted the creation of this operation.\n\tOriginalRequest *google_bigtable_admin_cluster_v11.Cluster `protobuf:\"bytes,1,opt,name=original_request\" json:\"original_request,omitempty\"`\n}\n\nfunc (m *UpdateClusterMetadata) Reset()         { *m = UpdateClusterMetadata{} }\nfunc (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) }\nfunc (*UpdateClusterMetadata) ProtoMessage()    {}\n\nfunc (m *UpdateClusterMetadata) GetOriginalRequest() *google_bigtable_admin_cluster_v11.Cluster {\n\tif m != nil {\n\t\treturn m.OriginalRequest\n\t}\n\treturn nil\n}\n\n// Request message for BigtableClusterService.DeleteCluster.\ntype DeleteClusterRequest struct {\n\t// The unique name of the cluster to be deleted.\n\t// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *DeleteClusterRequest) Reset()         { *m = DeleteClusterRequest{} }\nfunc (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteClusterRequest) ProtoMessage()    {}\n\n// Request message for BigtableClusterService.UndeleteCluster.\ntype UndeleteClusterRequest struct {\n\t// The unique name of the cluster to be un-deleted.\n\t// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *UndeleteClusterRequest) Reset()         { *m = UndeleteClusterRequest{} }\nfunc (m *UndeleteClusterRequest) String() string { return proto.CompactTextString(m) }\nfunc (*UndeleteClusterRequest) ProtoMessage()    {}\n\n// Metadata type for the operation returned by\n// BigtableClusterService.UndeleteCluster.\ntype UndeleteClusterMetadata struct {\n}\n\nfunc (m *UndeleteClusterMetadata) Reset()         { *m = UndeleteClusterMetadata{} }\nfunc (m *UndeleteClusterMetadata) String() string { return proto.CompactTextString(m) }\nfunc (*UndeleteClusterMetadata) ProtoMessage()    {}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.admin.cluster.v1;\n\nimport \"google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto\";\n\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableClusterServiceMessagesProto\";\noption java_package = \"com.google.bigtable.admin.cluster.v1\";\n\n\n// Request message for BigtableClusterService.ListZones.\nmessage ListZonesRequest {\n  // The unique name of the project for which a list of supported zones is\n  // requested.\n  // Values are of the form projects/<project>\n  string name = 1;\n}\n\n// Response message for BigtableClusterService.ListZones.\nmessage ListZonesResponse {\n  // The list of requested zones.\n  repeated Zone zones = 1;\n}\n\n// Request message for BigtableClusterService.GetCluster.\nmessage GetClusterRequest {\n  // The unique name of the requested cluster.\n  // Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>\n  string name = 1;\n}\n\n// Request message for BigtableClusterService.ListClusters.\nmessage ListClustersRequest {\n  // The unique name of the project for which a list of clusters is requested.\n  // Values are of the form projects/<project>\n  string name = 1;\n}\n\n// Response message for BigtableClusterService.ListClusters.\nmessage ListClustersResponse {\n  // The list of requested Clusters.\n  repeated Cluster clusters = 1;\n\n  // The zones for which clusters could not be retrieved.\n  repeated Zone failed_zones = 2;\n}\n\n// Request message for BigtableClusterService.CreateCluster.\nmessage CreateClusterRequest {\n  // The unique name of the zone in which to create the cluster.\n  // Values are of the form projects/<project>/zones/<zone>\n  string name = 1;\n\n  // The id to be used when referring to the new cluster within its zone,\n  // e.g. just the \"test-cluster\" section of the full name\n  // \"projects/<project>/zones/<zone>/clusters/test-cluster\".\n  string cluster_id = 2;\n\n  // The cluster to create.\n  // The \"name\", \"delete_time\", and \"current_operation\" fields must be left\n  // blank.\n  Cluster cluster = 3;\n}\n\n// Metadata type for the operation returned by\n// BigtableClusterService.CreateCluster.\nmessage CreateClusterMetadata {\n  // The request which prompted the creation of this operation.\n  CreateClusterRequest original_request = 1;\n\n  // The time at which original_request was received.\n\n  // The time at which this operation failed or was completed successfully.\n}\n\n// Metadata type for the operation returned by\n// BigtableClusterService.UpdateCluster.\nmessage UpdateClusterMetadata {\n  // The request which prompted the creation of this operation.\n  Cluster original_request = 1;\n\n  // The time at which original_request was received.\n\n  // The time at which this operation was cancelled. If set, this operation is\n  // in the process of undoing itself (which is guaranteed to succeed) and\n  // cannot be cancelled again.\n\n  // The time at which this operation failed or was completed successfully.\n}\n\n// Request message for BigtableClusterService.DeleteCluster.\nmessage DeleteClusterRequest {\n  // The unique name of the cluster to be deleted.\n  // Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>\n  string name = 1;\n}\n\n// Request message for BigtableClusterService.UndeleteCluster.\nmessage UndeleteClusterRequest {\n  // The unique name of the cluster to be un-deleted.\n  // Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>\n  string name = 1;\n}\n\n// Metadata type for the operation returned by\n// BigtableClusterService.UndeleteCluster.\nmessage UndeleteClusterMetadata {\n  // The time at which the original request was received.\n\n  // The time at which this operation failed or was completed successfully.\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto\n// DO NOT EDIT!\n\n/*\nPackage google_bigtable_v1 is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto\n\nIt has these top-level messages:\n\tRow\n\tFamily\n\tColumn\n\tCell\n\tRowRange\n\tColumnRange\n\tTimestampRange\n\tValueRange\n\tRowFilter\n\tMutation\n\tReadModifyWriteRule\n*/\npackage google_bigtable_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\n// Specifies the complete (requested) contents of a single row of a table.\n// Rows which exceed 256MiB in size cannot be read in full.\ntype Row struct {\n\t// The unique key which identifies this row within its table. This is the same\n\t// key that's used to identify the row in, for example, a MutateRowRequest.\n\t// May contain any non-empty byte string up to 16KiB in length.\n\tKey []byte `protobuf:\"bytes,1,opt,name=key,proto3\" json:\"key,omitempty\"`\n\t// May be empty, but only if the entire row is empty.\n\t// The mutual ordering of column families is not specified.\n\tFamilies []*Family `protobuf:\"bytes,2,rep,name=families\" json:\"families,omitempty\"`\n}\n\nfunc (m *Row) Reset()         { *m = Row{} }\nfunc (m *Row) String() string { return proto.CompactTextString(m) }\nfunc (*Row) ProtoMessage()    {}\n\nfunc (m *Row) GetFamilies() []*Family {\n\tif m != nil {\n\t\treturn m.Families\n\t}\n\treturn nil\n}\n\n// Specifies (some of) the contents of a single row/column family of a table.\ntype Family struct {\n\t// The unique key which identifies this family within its row. This is the\n\t// same key that's used to identify the family in, for example, a RowFilter\n\t// which sets its \"family_name_regex_filter\" field.\n\t// Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may\n\t// produce cells in a sentinel family with an empty name.\n\t// Must be no greater than 64 characters in length.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// Must not be empty. Sorted in order of increasing \"qualifier\".\n\tColumns []*Column `protobuf:\"bytes,2,rep,name=columns\" json:\"columns,omitempty\"`\n}\n\nfunc (m *Family) Reset()         { *m = Family{} }\nfunc (m *Family) String() string { return proto.CompactTextString(m) }\nfunc (*Family) ProtoMessage()    {}\n\nfunc (m *Family) GetColumns() []*Column {\n\tif m != nil {\n\t\treturn m.Columns\n\t}\n\treturn nil\n}\n\n// Specifies (some of) the contents of a single row/column of a table.\ntype Column struct {\n\t// The unique key which identifies this column within its family. This is the\n\t// same key that's used to identify the column in, for example, a RowFilter\n\t// which sets its \"column_qualifier_regex_filter\" field.\n\t// May contain any byte string, including the empty string, up to 16kiB in\n\t// length.\n\tQualifier []byte `protobuf:\"bytes,1,opt,name=qualifier,proto3\" json:\"qualifier,omitempty\"`\n\t// Must not be empty. Sorted in order of decreasing \"timestamp_micros\".\n\tCells []*Cell `protobuf:\"bytes,2,rep,name=cells\" json:\"cells,omitempty\"`\n}\n\nfunc (m *Column) Reset()         { *m = Column{} }\nfunc (m *Column) String() string { return proto.CompactTextString(m) }\nfunc (*Column) ProtoMessage()    {}\n\nfunc (m *Column) GetCells() []*Cell {\n\tif m != nil {\n\t\treturn m.Cells\n\t}\n\treturn nil\n}\n\n// Specifies (some of) the contents of a single row/column/timestamp of a table.\ntype Cell struct {\n\t// The cell's stored timestamp, which also uniquely identifies it within\n\t// its column.\n\t// Values are always expressed in microseconds, but individual tables may set\n\t// a coarser \"granularity\" to further restrict the allowed values. For\n\t// example, a table which specifies millisecond granularity will only allow\n\t// values of \"timestamp_micros\" which are multiples of 1000.\n\tTimestampMicros int64 `protobuf:\"varint,1,opt,name=timestamp_micros\" json:\"timestamp_micros,omitempty\"`\n\t// The value stored in the cell.\n\t// May contain any byte string, including the empty string, up to 100MiB in\n\t// length.\n\tValue []byte `protobuf:\"bytes,2,opt,name=value,proto3\" json:\"value,omitempty\"`\n}\n\nfunc (m *Cell) Reset()         { *m = Cell{} }\nfunc (m *Cell) String() string { return proto.CompactTextString(m) }\nfunc (*Cell) ProtoMessage()    {}\n\n// Specifies a contiguous range of rows.\ntype RowRange struct {\n\t// Inclusive lower bound. If left empty, interpreted as the empty string.\n\tStartKey []byte `protobuf:\"bytes,2,opt,name=start_key,proto3\" json:\"start_key,omitempty\"`\n\t// Exclusive upper bound. If left empty, interpreted as infinity.\n\tEndKey []byte `protobuf:\"bytes,3,opt,name=end_key,proto3\" json:\"end_key,omitempty\"`\n}\n\nfunc (m *RowRange) Reset()         { *m = RowRange{} }\nfunc (m *RowRange) String() string { return proto.CompactTextString(m) }\nfunc (*RowRange) ProtoMessage()    {}\n\n// Specifies a contiguous range of columns within a single column family.\n// The range spans from <column_family>:<start_qualifier> to\n// <column_family>:<end_qualifier>, where both bounds can be either inclusive or\n// exclusive.\ntype ColumnRange struct {\n\t// The name of the column family within which this range falls.\n\tFamilyName string `protobuf:\"bytes,1,opt,name=family_name\" json:\"family_name,omitempty\"`\n\t// Used when giving an inclusive lower bound for the range.\n\tStartQualifierInclusive []byte `protobuf:\"bytes,2,opt,name=start_qualifier_inclusive,proto3\" json:\"start_qualifier_inclusive,omitempty\"`\n\t// Used when giving an exclusive lower bound for the range.\n\tStartQualifierExclusive []byte `protobuf:\"bytes,3,opt,name=start_qualifier_exclusive,proto3\" json:\"start_qualifier_exclusive,omitempty\"`\n\t// Used when giving an inclusive upper bound for the range.\n\tEndQualifierInclusive []byte `protobuf:\"bytes,4,opt,name=end_qualifier_inclusive,proto3\" json:\"end_qualifier_inclusive,omitempty\"`\n\t// Used when giving an exclusive upper bound for the range.\n\tEndQualifierExclusive []byte `protobuf:\"bytes,5,opt,name=end_qualifier_exclusive,proto3\" json:\"end_qualifier_exclusive,omitempty\"`\n}\n\nfunc (m *ColumnRange) Reset()         { *m = ColumnRange{} }\nfunc (m *ColumnRange) String() string { return proto.CompactTextString(m) }\nfunc (*ColumnRange) ProtoMessage()    {}\n\n// Specified a contiguous range of microsecond timestamps.\ntype TimestampRange struct {\n\t// Inclusive lower bound. If left empty, interpreted as 0.\n\tStartTimestampMicros int64 `protobuf:\"varint,1,opt,name=start_timestamp_micros\" json:\"start_timestamp_micros,omitempty\"`\n\t// Exclusive upper bound. If left empty, interpreted as infinity.\n\tEndTimestampMicros int64 `protobuf:\"varint,2,opt,name=end_timestamp_micros\" json:\"end_timestamp_micros,omitempty\"`\n}\n\nfunc (m *TimestampRange) Reset()         { *m = TimestampRange{} }\nfunc (m *TimestampRange) String() string { return proto.CompactTextString(m) }\nfunc (*TimestampRange) ProtoMessage()    {}\n\n// Specifies a contiguous range of raw byte values.\ntype ValueRange struct {\n\t// Used when giving an inclusive lower bound for the range.\n\tStartValueInclusive []byte `protobuf:\"bytes,1,opt,name=start_value_inclusive,proto3\" json:\"start_value_inclusive,omitempty\"`\n\t// Used when giving an exclusive lower bound for the range.\n\tStartValueExclusive []byte `protobuf:\"bytes,2,opt,name=start_value_exclusive,proto3\" json:\"start_value_exclusive,omitempty\"`\n\t// Used when giving an inclusive upper bound for the range.\n\tEndValueInclusive []byte `protobuf:\"bytes,3,opt,name=end_value_inclusive,proto3\" json:\"end_value_inclusive,omitempty\"`\n\t// Used when giving an exclusive upper bound for the range.\n\tEndValueExclusive []byte `protobuf:\"bytes,4,opt,name=end_value_exclusive,proto3\" json:\"end_value_exclusive,omitempty\"`\n}\n\nfunc (m *ValueRange) Reset()         { *m = ValueRange{} }\nfunc (m *ValueRange) String() string { return proto.CompactTextString(m) }\nfunc (*ValueRange) ProtoMessage()    {}\n\n// Takes a row as input and produces an alternate view of the row based on\n// specified rules. For example, a RowFilter might trim down a row to include\n// just the cells from columns matching a given regular expression, or might\n// return all the cells of a row but not their values. More complicated filters\n// can be composed out of these components to express requests such as, \"within\n// every column of a particular family, give just the two most recent cells\n// which are older than timestamp X.\"\n//\n// There are two broad categories of RowFilters (true filters and transformers),\n// as well as two ways to compose simple filters into more complex ones\n// (chains and interleaves). They work as follows:\n//\n// * True filters alter the input row by excluding some of its cells wholesale\n// from the output row. An example of a true filter is the \"value_regex_filter\",\n// which excludes cells whose values don't match the specified pattern. All\n// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)\n// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An\n// important point to keep in mind is that RE2(.) is equivalent by default to\n// RE2([^\\n]), meaning that it does not match newlines. When attempting to match\n// an arbitrary byte, you should therefore use the escape sequence '\\C', which\n// may need to be further escaped as '\\\\C' in your client language.\n//\n// * Transformers alter the input row by changing the values of some of its\n// cells in the output, without excluding them completely. Currently, the only\n// supported transformer is the \"strip_value_transformer\", which replaces every\n// cell's value with the empty string.\n//\n// * Chains and interleaves are described in more detail in the\n// RowFilter.Chain and RowFilter.Interleave documentation.\n//\n// The total serialized size of a RowFilter message must not\n// exceed 4096 bytes, and RowFilters may not be nested within each other\n// (in Chains or Interleaves) to a depth of more than 20.\ntype RowFilter struct {\n\t// Applies several RowFilters to the data in sequence, progressively\n\t// narrowing the results.\n\tChain *RowFilter_Chain `protobuf:\"bytes,1,opt,name=chain\" json:\"chain,omitempty\"`\n\t// Applies several RowFilters to the data in parallel and combines the\n\t// results.\n\tInterleave *RowFilter_Interleave `protobuf:\"bytes,2,opt,name=interleave\" json:\"interleave,omitempty\"`\n\t// Applies one of two possible RowFilters to the data based on the output of\n\t// a predicate RowFilter.\n\tCondition *RowFilter_Condition `protobuf:\"bytes,3,opt,name=condition\" json:\"condition,omitempty\"`\n\t// Matches only cells from rows whose keys satisfy the given RE2 regex. In\n\t// other words, passes through the entire row when the key matches, and\n\t// otherwise produces an empty row.\n\t// Note that, since row keys can contain arbitrary bytes, the '\\C' escape\n\t// sequence must be used if a true wildcard is desired. The '.' character\n\t// will not match the new line character '\\n', which may be present in a\n\t// binary key.\n\tRowKeyRegexFilter []byte `protobuf:\"bytes,4,opt,name=row_key_regex_filter,proto3\" json:\"row_key_regex_filter,omitempty\"`\n\t// Matches all cells from a row with probability p, and matches no cells\n\t// from the row with probability 1-p.\n\tRowSampleFilter float64 `protobuf:\"fixed64,14,opt,name=row_sample_filter\" json:\"row_sample_filter,omitempty\"`\n\t// Matches only cells from columns whose families satisfy the given RE2\n\t// regex. For technical reasons, the regex must not contain the ':'\n\t// character, even if it is not being used as a literal.\n\t// Note that, since column families cannot contain the new line character\n\t// '\\n', it is sufficient to use '.' as a full wildcard when matching\n\t// column family names.\n\tFamilyNameRegexFilter string `protobuf:\"bytes,5,opt,name=family_name_regex_filter\" json:\"family_name_regex_filter,omitempty\"`\n\t// Matches only cells from columns whose qualifiers satisfy the given RE2\n\t// regex.\n\t// Note that, since column qualifiers can contain arbitrary bytes, the '\\C'\n\t// escape sequence must be used if a true wildcard is desired. The '.'\n\t// character will not match the new line character '\\n', which may be\n\t// present in a binary qualifier.\n\tColumnQualifierRegexFilter []byte `protobuf:\"bytes,6,opt,name=column_qualifier_regex_filter,proto3\" json:\"column_qualifier_regex_filter,omitempty\"`\n\t// Matches only cells from columns within the given range.\n\tColumnRangeFilter *ColumnRange `protobuf:\"bytes,7,opt,name=column_range_filter\" json:\"column_range_filter,omitempty\"`\n\t// Matches only cells with timestamps within the given range.\n\tTimestampRangeFilter *TimestampRange `protobuf:\"bytes,8,opt,name=timestamp_range_filter\" json:\"timestamp_range_filter,omitempty\"`\n\t// Matches only cells with values that satisfy the given regular expression.\n\t// Note that, since cell values can contain arbitrary bytes, the '\\C' escape\n\t// sequence must be used if a true wildcard is desired. The '.' character\n\t// will not match the new line character '\\n', which may be present in a\n\t// binary value.\n\tValueRegexFilter []byte `protobuf:\"bytes,9,opt,name=value_regex_filter,proto3\" json:\"value_regex_filter,omitempty\"`\n\t// Matches only cells with values that fall within the given range.\n\tValueRangeFilter *ValueRange `protobuf:\"bytes,15,opt,name=value_range_filter\" json:\"value_range_filter,omitempty\"`\n\t// Skips the first N cells of each row, matching all subsequent cells.\n\tCellsPerRowOffsetFilter int32 `protobuf:\"varint,10,opt,name=cells_per_row_offset_filter\" json:\"cells_per_row_offset_filter,omitempty\"`\n\t// Matches only the first N cells of each row.\n\tCellsPerRowLimitFilter int32 `protobuf:\"varint,11,opt,name=cells_per_row_limit_filter\" json:\"cells_per_row_limit_filter,omitempty\"`\n\t// Matches only the most recent N cells within each column. For example,\n\t// if N=2, this filter would match column \"foo:bar\" at timestamps 10 and 9,\n\t// skip all earlier cells in \"foo:bar\", and then begin matching again in\n\t// column \"foo:bar2\".\n\tCellsPerColumnLimitFilter int32 `protobuf:\"varint,12,opt,name=cells_per_column_limit_filter\" json:\"cells_per_column_limit_filter,omitempty\"`\n\t// Replaces each cell's value with the empty string.\n\tStripValueTransformer bool `protobuf:\"varint,13,opt,name=strip_value_transformer\" json:\"strip_value_transformer,omitempty\"`\n}\n\nfunc (m *RowFilter) Reset()         { *m = RowFilter{} }\nfunc (m *RowFilter) String() string { return proto.CompactTextString(m) }\nfunc (*RowFilter) ProtoMessage()    {}\n\nfunc (m *RowFilter) GetChain() *RowFilter_Chain {\n\tif m != nil {\n\t\treturn m.Chain\n\t}\n\treturn nil\n}\n\nfunc (m *RowFilter) GetInterleave() *RowFilter_Interleave {\n\tif m != nil {\n\t\treturn m.Interleave\n\t}\n\treturn nil\n}\n\nfunc (m *RowFilter) GetCondition() *RowFilter_Condition {\n\tif m != nil {\n\t\treturn m.Condition\n\t}\n\treturn nil\n}\n\nfunc (m *RowFilter) GetColumnRangeFilter() *ColumnRange {\n\tif m != nil {\n\t\treturn m.ColumnRangeFilter\n\t}\n\treturn nil\n}\n\nfunc (m *RowFilter) GetTimestampRangeFilter() *TimestampRange {\n\tif m != nil {\n\t\treturn m.TimestampRangeFilter\n\t}\n\treturn nil\n}\n\nfunc (m *RowFilter) GetValueRangeFilter() *ValueRange {\n\tif m != nil {\n\t\treturn m.ValueRangeFilter\n\t}\n\treturn nil\n}\n\n// A RowFilter which sends rows through several RowFilters in sequence.\ntype RowFilter_Chain struct {\n\t// The elements of \"filters\" are chained together to process the input row:\n\t// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row\n\t// The full chain is executed atomically.\n\tFilters []*RowFilter `protobuf:\"bytes,1,rep,name=filters\" json:\"filters,omitempty\"`\n}\n\nfunc (m *RowFilter_Chain) Reset()         { *m = RowFilter_Chain{} }\nfunc (m *RowFilter_Chain) String() string { return proto.CompactTextString(m) }\nfunc (*RowFilter_Chain) ProtoMessage()    {}\n\nfunc (m *RowFilter_Chain) GetFilters() []*RowFilter {\n\tif m != nil {\n\t\treturn m.Filters\n\t}\n\treturn nil\n}\n\n// A RowFilter which sends each row to each of several component\n// RowFilters and interleaves the results.\ntype RowFilter_Interleave struct {\n\t// The elements of \"filters\" all process a copy of the input row, and the\n\t// results are pooled, sorted, and combined into a single output row.\n\t// If multiple cells are produced with the same column and timestamp,\n\t// they will all appear in the output row in an unspecified mutual order.\n\t// Consider the following example, with three filters:\n\t//\n\t//                              input row\n\t//                                  |\n\t//        -----------------------------------------------------\n\t//        |                         |                         |\n\t//       f(0)                      f(1)                      f(2)\n\t//        |                         |                         |\n\t// 1: foo,bar,10,x             foo,bar,10,z              far,bar,7,a\n\t// 2: foo,blah,11,z            far,blah,5,x              far,blah,5,x\n\t//        |                         |                         |\n\t//        -----------------------------------------------------\n\t//                                  |\n\t// 1:                        foo,bar,10,z     // could have switched with #2\n\t// 2:                        foo,bar,10,x     // could have switched with #1\n\t// 3:                        foo,blah,11,z\n\t// 4:                        far,bar,7,a\n\t// 5:                        far,blah,5,x     // identical to #6\n\t// 6:                        far,blah,5,x     // identical to #5\n\t// All interleaved filters are executed atomically.\n\tFilters []*RowFilter `protobuf:\"bytes,1,rep,name=filters\" json:\"filters,omitempty\"`\n}\n\nfunc (m *RowFilter_Interleave) Reset()         { *m = RowFilter_Interleave{} }\nfunc (m *RowFilter_Interleave) String() string { return proto.CompactTextString(m) }\nfunc (*RowFilter_Interleave) ProtoMessage()    {}\n\nfunc (m *RowFilter_Interleave) GetFilters() []*RowFilter {\n\tif m != nil {\n\t\treturn m.Filters\n\t}\n\treturn nil\n}\n\n// A RowFilter which evaluates one of two possible RowFilters, depending on\n// whether or not a predicate RowFilter outputs any cells from the input row.\n//\n// IMPORTANT NOTE: The predicate filter does not execute atomically with the\n// true and false filters, which may lead to inconsistent or unexpected\n// results. Additionally, Condition filters have poor performance, especially\n// when filters are set for the false condition.\ntype RowFilter_Condition struct {\n\t// If \"predicate_filter\" outputs any cells, then \"true_filter\" will be\n\t// evaluated on the input row. Otherwise, \"false_filter\" will be evaluated.\n\tPredicateFilter *RowFilter `protobuf:\"bytes,1,opt,name=predicate_filter\" json:\"predicate_filter,omitempty\"`\n\t// The filter to apply to the input row if \"predicate_filter\" returns any\n\t// results. If not provided, no results will be returned in the true case.\n\tTrueFilter *RowFilter `protobuf:\"bytes,2,opt,name=true_filter\" json:\"true_filter,omitempty\"`\n\t// The filter to apply to the input row if \"predicate_filter\" does not\n\t// return any results. If not provided, no results will be returned in the\n\t// false case.\n\tFalseFilter *RowFilter `protobuf:\"bytes,3,opt,name=false_filter\" json:\"false_filter,omitempty\"`\n}\n\nfunc (m *RowFilter_Condition) Reset()         { *m = RowFilter_Condition{} }\nfunc (m *RowFilter_Condition) String() string { return proto.CompactTextString(m) }\nfunc (*RowFilter_Condition) ProtoMessage()    {}\n\nfunc (m *RowFilter_Condition) GetPredicateFilter() *RowFilter {\n\tif m != nil {\n\t\treturn m.PredicateFilter\n\t}\n\treturn nil\n}\n\nfunc (m *RowFilter_Condition) GetTrueFilter() *RowFilter {\n\tif m != nil {\n\t\treturn m.TrueFilter\n\t}\n\treturn nil\n}\n\nfunc (m *RowFilter_Condition) GetFalseFilter() *RowFilter {\n\tif m != nil {\n\t\treturn m.FalseFilter\n\t}\n\treturn nil\n}\n\n// Specifies a particular change to be made to the contents of a row.\ntype Mutation struct {\n\t// Set a cell's value.\n\tSetCell *Mutation_SetCell `protobuf:\"bytes,1,opt,name=set_cell\" json:\"set_cell,omitempty\"`\n\t// Deletes cells from a column.\n\tDeleteFromColumn *Mutation_DeleteFromColumn `protobuf:\"bytes,2,opt,name=delete_from_column\" json:\"delete_from_column,omitempty\"`\n\t// Deletes cells from a column family.\n\tDeleteFromFamily *Mutation_DeleteFromFamily `protobuf:\"bytes,3,opt,name=delete_from_family\" json:\"delete_from_family,omitempty\"`\n\t// Deletes cells from the entire row.\n\tDeleteFromRow *Mutation_DeleteFromRow `protobuf:\"bytes,4,opt,name=delete_from_row\" json:\"delete_from_row,omitempty\"`\n}\n\nfunc (m *Mutation) Reset()         { *m = Mutation{} }\nfunc (m *Mutation) String() string { return proto.CompactTextString(m) }\nfunc (*Mutation) ProtoMessage()    {}\n\nfunc (m *Mutation) GetSetCell() *Mutation_SetCell {\n\tif m != nil {\n\t\treturn m.SetCell\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetDeleteFromColumn() *Mutation_DeleteFromColumn {\n\tif m != nil {\n\t\treturn m.DeleteFromColumn\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetDeleteFromFamily() *Mutation_DeleteFromFamily {\n\tif m != nil {\n\t\treturn m.DeleteFromFamily\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetDeleteFromRow() *Mutation_DeleteFromRow {\n\tif m != nil {\n\t\treturn m.DeleteFromRow\n\t}\n\treturn nil\n}\n\n// A Mutation which sets the value of the specified cell.\ntype Mutation_SetCell struct {\n\t// The name of the family into which new data should be written.\n\t// Must match [-_.a-zA-Z0-9]+\n\tFamilyName string `protobuf:\"bytes,1,opt,name=family_name\" json:\"family_name,omitempty\"`\n\t// The qualifier of the column into which new data should be written.\n\t// Can be any byte string, including the empty string.\n\tColumnQualifier []byte `protobuf:\"bytes,2,opt,name=column_qualifier,proto3\" json:\"column_qualifier,omitempty\"`\n\t// The timestamp of the cell into which new data should be written.\n\t// Use -1 for current Bigtable server time.\n\t// Otherwise, the client should set this value itself, noting that the\n\t// default value is a timestamp of zero if the field is left unspecified.\n\t// Values must match the \"granularity\" of the table (e.g. micros, millis).\n\tTimestampMicros int64 `protobuf:\"varint,3,opt,name=timestamp_micros\" json:\"timestamp_micros,omitempty\"`\n\t// The value to be written into the specified cell.\n\tValue []byte `protobuf:\"bytes,4,opt,name=value,proto3\" json:\"value,omitempty\"`\n}\n\nfunc (m *Mutation_SetCell) Reset()         { *m = Mutation_SetCell{} }\nfunc (m *Mutation_SetCell) String() string { return proto.CompactTextString(m) }\nfunc (*Mutation_SetCell) ProtoMessage()    {}\n\n// A Mutation which deletes cells from the specified column, optionally\n// restricting the deletions to a given timestamp range.\ntype Mutation_DeleteFromColumn struct {\n\t// The name of the family from which cells should be deleted.\n\t// Must match [-_.a-zA-Z0-9]+\n\tFamilyName string `protobuf:\"bytes,1,opt,name=family_name\" json:\"family_name,omitempty\"`\n\t// The qualifier of the column from which cells should be deleted.\n\t// Can be any byte string, including the empty string.\n\tColumnQualifier []byte `protobuf:\"bytes,2,opt,name=column_qualifier,proto3\" json:\"column_qualifier,omitempty\"`\n\t// The range of timestamps within which cells should be deleted.\n\tTimeRange *TimestampRange `protobuf:\"bytes,3,opt,name=time_range\" json:\"time_range,omitempty\"`\n}\n\nfunc (m *Mutation_DeleteFromColumn) Reset()         { *m = Mutation_DeleteFromColumn{} }\nfunc (m *Mutation_DeleteFromColumn) String() string { return proto.CompactTextString(m) }\nfunc (*Mutation_DeleteFromColumn) ProtoMessage()    {}\n\nfunc (m *Mutation_DeleteFromColumn) GetTimeRange() *TimestampRange {\n\tif m != nil {\n\t\treturn m.TimeRange\n\t}\n\treturn nil\n}\n\n// A Mutation which deletes all cells from the specified column family.\ntype Mutation_DeleteFromFamily struct {\n\t// The name of the family from which cells should be deleted.\n\t// Must match [-_.a-zA-Z0-9]+\n\tFamilyName string `protobuf:\"bytes,1,opt,name=family_name\" json:\"family_name,omitempty\"`\n}\n\nfunc (m *Mutation_DeleteFromFamily) Reset()         { *m = Mutation_DeleteFromFamily{} }\nfunc (m *Mutation_DeleteFromFamily) String() string { return proto.CompactTextString(m) }\nfunc (*Mutation_DeleteFromFamily) ProtoMessage()    {}\n\n// A Mutation which deletes all cells from the containing row.\ntype Mutation_DeleteFromRow struct {\n}\n\nfunc (m *Mutation_DeleteFromRow) Reset()         { *m = Mutation_DeleteFromRow{} }\nfunc (m *Mutation_DeleteFromRow) String() string { return proto.CompactTextString(m) }\nfunc (*Mutation_DeleteFromRow) ProtoMessage()    {}\n\n// Specifies an atomic read/modify/write operation on the latest value of the\n// specified column.\ntype ReadModifyWriteRule struct {\n\t// The name of the family to which the read/modify/write should be applied.\n\t// Must match [-_.a-zA-Z0-9]+\n\tFamilyName string `protobuf:\"bytes,1,opt,name=family_name\" json:\"family_name,omitempty\"`\n\t// The qualifier of the column to which the read/modify/write should be\n\t// applied.\n\t// Can be any byte string, including the empty string.\n\tColumnQualifier []byte `protobuf:\"bytes,2,opt,name=column_qualifier,proto3\" json:\"column_qualifier,omitempty\"`\n\t// Rule specifying that \"append_value\" be appended to the existing value.\n\t// If the targeted cell is unset, it will be treated as containing the\n\t// empty string.\n\tAppendValue []byte `protobuf:\"bytes,3,opt,name=append_value,proto3\" json:\"append_value,omitempty\"`\n\t// Rule specifying that \"increment_amount\" be added to the existing value.\n\t// If the targeted cell is unset, it will be treated as containing a zero.\n\t// Otherwise, the targeted cell must contain an 8-byte value (interpreted\n\t// as a 64-bit big-endian signed integer), or the entire request will fail.\n\tIncrementAmount int64 `protobuf:\"varint,4,opt,name=increment_amount\" json:\"increment_amount,omitempty\"`\n}\n\nfunc (m *ReadModifyWriteRule) Reset()         { *m = ReadModifyWriteRule{} }\nfunc (m *ReadModifyWriteRule) String() string { return proto.CompactTextString(m) }\nfunc (*ReadModifyWriteRule) ProtoMessage()    {}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.v1;\n\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableDataProto\";\noption java_package = \"com.google.bigtable.v1\";\n\n\n// Specifies the complete (requested) contents of a single row of a table.\n// Rows which exceed 256MiB in size cannot be read in full.\nmessage Row {\n  // The unique key which identifies this row within its table. This is the same\n  // key that's used to identify the row in, for example, a MutateRowRequest.\n  // May contain any non-empty byte string up to 16KiB in length.\n  bytes key = 1;\n\n  // May be empty, but only if the entire row is empty.\n  // The mutual ordering of column families is not specified.\n  repeated Family families = 2;\n}\n\n// Specifies (some of) the contents of a single row/column family of a table.\nmessage Family {\n  // The unique key which identifies this family within its row. This is the\n  // same key that's used to identify the family in, for example, a RowFilter\n  // which sets its \"family_name_regex_filter\" field.\n  // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may\n  // produce cells in a sentinel family with an empty name.\n  // Must be no greater than 64 characters in length.\n  string name = 1;\n\n  // Must not be empty. Sorted in order of increasing \"qualifier\".\n  repeated Column columns = 2;\n}\n\n// Specifies (some of) the contents of a single row/column of a table.\nmessage Column {\n  // The unique key which identifies this column within its family. This is the\n  // same key that's used to identify the column in, for example, a RowFilter\n  // which sets its \"column_qualifier_regex_filter\" field.\n  // May contain any byte string, including the empty string, up to 16kiB in\n  // length.\n  bytes qualifier = 1;\n\n  // Must not be empty. Sorted in order of decreasing \"timestamp_micros\".\n  repeated Cell cells = 2;\n}\n\n// Specifies (some of) the contents of a single row/column/timestamp of a table.\nmessage Cell {\n  // The cell's stored timestamp, which also uniquely identifies it within\n  // its column.\n  // Values are always expressed in microseconds, but individual tables may set\n  // a coarser \"granularity\" to further restrict the allowed values. For\n  // example, a table which specifies millisecond granularity will only allow\n  // values of \"timestamp_micros\" which are multiples of 1000.\n  int64 timestamp_micros = 1;\n\n  // The value stored in the cell.\n  // May contain any byte string, including the empty string, up to 100MiB in\n  // length.\n  bytes value = 2;\n}\n\n// Specifies a contiguous range of rows.\nmessage RowRange {\n  // Inclusive lower bound. If left empty, interpreted as the empty string.\n  bytes start_key = 2;\n\n  // Exclusive upper bound. If left empty, interpreted as infinity.\n  bytes end_key = 3;\n}\n\n// Specifies a contiguous range of columns within a single column family.\n// The range spans from <column_family>:<start_qualifier> to\n// <column_family>:<end_qualifier>, where both bounds can be either inclusive or\n// exclusive.\nmessage ColumnRange {\n  // The name of the column family within which this range falls.\n  string family_name = 1;\n\n  oneof start_qualifier {\n    // Used when giving an inclusive lower bound for the range.\n    bytes start_qualifier_inclusive = 2;\n\n    // Used when giving an exclusive lower bound for the range.\n    bytes start_qualifier_exclusive = 3;\n  }\n\n  oneof end_qualifier {\n    // Used when giving an inclusive upper bound for the range.\n    bytes end_qualifier_inclusive = 4;\n\n    // Used when giving an exclusive upper bound for the range.\n    bytes end_qualifier_exclusive = 5;\n  }\n}\n\n// Specified a contiguous range of microsecond timestamps.\nmessage TimestampRange {\n  // Inclusive lower bound. If left empty, interpreted as 0.\n  int64 start_timestamp_micros = 1;\n\n  // Exclusive upper bound. If left empty, interpreted as infinity.\n  int64 end_timestamp_micros = 2;\n}\n\n// Specifies a contiguous range of raw byte values.\nmessage ValueRange {\n  oneof start_value {\n    // Used when giving an inclusive lower bound for the range.\n    bytes start_value_inclusive = 1;\n\n    // Used when giving an exclusive lower bound for the range.\n    bytes start_value_exclusive = 2;\n  }\n\n  oneof end_value {\n    // Used when giving an inclusive upper bound for the range.\n    bytes end_value_inclusive = 3;\n\n    // Used when giving an exclusive upper bound for the range.\n    bytes end_value_exclusive = 4;\n  }\n}\n\n// Takes a row as input and produces an alternate view of the row based on\n// specified rules. For example, a RowFilter might trim down a row to include\n// just the cells from columns matching a given regular expression, or might\n// return all the cells of a row but not their values. More complicated filters\n// can be composed out of these components to express requests such as, \"within\n// every column of a particular family, give just the two most recent cells\n// which are older than timestamp X.\"\n//\n// There are two broad categories of RowFilters (true filters and transformers),\n// as well as two ways to compose simple filters into more complex ones\n// (chains and interleaves). They work as follows:\n//\n// * True filters alter the input row by excluding some of its cells wholesale\n// from the output row. An example of a true filter is the \"value_regex_filter\",\n// which excludes cells whose values don't match the specified pattern. All\n// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)\n// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An\n// important point to keep in mind is that RE2(.) is equivalent by default to\n// RE2([^\\n]), meaning that it does not match newlines. When attempting to match\n// an arbitrary byte, you should therefore use the escape sequence '\\C', which\n// may need to be further escaped as '\\\\C' in your client language.\n//\n// * Transformers alter the input row by changing the values of some of its\n// cells in the output, without excluding them completely. Currently, the only\n// supported transformer is the \"strip_value_transformer\", which replaces every\n// cell's value with the empty string.\n//\n// * Chains and interleaves are described in more detail in the\n// RowFilter.Chain and RowFilter.Interleave documentation.\n//\n// The total serialized size of a RowFilter message must not\n// exceed 4096 bytes, and RowFilters may not be nested within each other\n// (in Chains or Interleaves) to a depth of more than 20.\nmessage RowFilter {\n  // A RowFilter which sends rows through several RowFilters in sequence.\n  message Chain {\n    // The elements of \"filters\" are chained together to process the input row:\n    // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row\n    // The full chain is executed atomically.\n    repeated RowFilter filters = 1;\n  }\n\n  // A RowFilter which sends each row to each of several component\n  // RowFilters and interleaves the results.\n  message Interleave {\n    // The elements of \"filters\" all process a copy of the input row, and the\n    // results are pooled, sorted, and combined into a single output row.\n    // If multiple cells are produced with the same column and timestamp,\n    // they will all appear in the output row in an unspecified mutual order.\n    // Consider the following example, with three filters:\n    //\n    //                              input row\n    //                                  |\n    //        -----------------------------------------------------\n    //        |                         |                         |\n    //       f(0)                      f(1)                      f(2)\n    //        |                         |                         |\n    // 1: foo,bar,10,x             foo,bar,10,z              far,bar,7,a\n    // 2: foo,blah,11,z            far,blah,5,x              far,blah,5,x\n    //        |                         |                         |\n    //        -----------------------------------------------------\n    //                                  |\n    // 1:                        foo,bar,10,z     // could have switched with #2\n    // 2:                        foo,bar,10,x     // could have switched with #1\n    // 3:                        foo,blah,11,z\n    // 4:                        far,bar,7,a\n    // 5:                        far,blah,5,x     // identical to #6\n    // 6:                        far,blah,5,x     // identical to #5\n    // All interleaved filters are executed atomically.\n    repeated RowFilter filters = 1;\n  }\n\n  // A RowFilter which evaluates one of two possible RowFilters, depending on\n  // whether or not a predicate RowFilter outputs any cells from the input row.\n  //\n  // IMPORTANT NOTE: The predicate filter does not execute atomically with the\n  // true and false filters, which may lead to inconsistent or unexpected\n  // results. Additionally, Condition filters have poor performance, especially\n  // when filters are set for the false condition.\n  message Condition {\n    // If \"predicate_filter\" outputs any cells, then \"true_filter\" will be\n    // evaluated on the input row. Otherwise, \"false_filter\" will be evaluated.\n    RowFilter predicate_filter = 1;\n\n    // The filter to apply to the input row if \"predicate_filter\" returns any\n    // results. If not provided, no results will be returned in the true case.\n    RowFilter true_filter = 2;\n\n    // The filter to apply to the input row if \"predicate_filter\" does not\n    // return any results. If not provided, no results will be returned in the\n    // false case.\n    RowFilter false_filter = 3;\n  }\n\n  oneof filter {\n    // Applies several RowFilters to the data in sequence, progressively\n    // narrowing the results.\n    Chain chain = 1;\n\n    // Applies several RowFilters to the data in parallel and combines the\n    // results.\n    Interleave interleave = 2;\n\n    // Applies one of two possible RowFilters to the data based on the output of\n    // a predicate RowFilter.\n    Condition condition = 3;\n\n    // Matches only cells from rows whose keys satisfy the given RE2 regex. In\n    // other words, passes through the entire row when the key matches, and\n    // otherwise produces an empty row.\n    // Note that, since row keys can contain arbitrary bytes, the '\\C' escape\n    // sequence must be used if a true wildcard is desired. The '.' character\n    // will not match the new line character '\\n', which may be present in a\n    // binary key.\n    bytes row_key_regex_filter = 4;\n\n    // Matches all cells from a row with probability p, and matches no cells\n    // from the row with probability 1-p.\n    double row_sample_filter = 14;\n\n    // Matches only cells from columns whose families satisfy the given RE2\n    // regex. For technical reasons, the regex must not contain the ':'\n    // character, even if it is not being used as a literal.\n    // Note that, since column families cannot contain the new line character\n    // '\\n', it is sufficient to use '.' as a full wildcard when matching\n    // column family names.\n    string family_name_regex_filter = 5;\n\n    // Matches only cells from columns whose qualifiers satisfy the given RE2\n    // regex.\n    // Note that, since column qualifiers can contain arbitrary bytes, the '\\C'\n    // escape sequence must be used if a true wildcard is desired. The '.'\n    // character will not match the new line character '\\n', which may be\n    // present in a binary qualifier.\n    bytes column_qualifier_regex_filter = 6;\n\n    // Matches only cells from columns within the given range.\n    ColumnRange column_range_filter = 7;\n\n    // Matches only cells with timestamps within the given range.\n    TimestampRange timestamp_range_filter = 8;\n\n    // Matches only cells with values that satisfy the given regular expression.\n    // Note that, since cell values can contain arbitrary bytes, the '\\C' escape\n    // sequence must be used if a true wildcard is desired. The '.' character\n    // will not match the new line character '\\n', which may be present in a\n    // binary value.\n    bytes value_regex_filter = 9;\n\n    // Matches only cells with values that fall within the given range.\n    ValueRange value_range_filter = 15;\n\n    // Skips the first N cells of each row, matching all subsequent cells.\n    int32 cells_per_row_offset_filter = 10;\n\n    // Matches only the first N cells of each row.\n    int32 cells_per_row_limit_filter = 11;\n\n    // Matches only the most recent N cells within each column. For example,\n    // if N=2, this filter would match column \"foo:bar\" at timestamps 10 and 9,\n    // skip all earlier cells in \"foo:bar\", and then begin matching again in\n    // column \"foo:bar2\".\n    int32 cells_per_column_limit_filter = 12;\n\n    // Replaces each cell's value with the empty string.\n    bool strip_value_transformer = 13;\n  }\n}\n\n// Specifies a particular change to be made to the contents of a row.\nmessage Mutation {\n  // A Mutation which sets the value of the specified cell.\n  message SetCell {\n    // The name of the family into which new data should be written.\n    // Must match [-_.a-zA-Z0-9]+\n    string family_name = 1;\n\n    // The qualifier of the column into which new data should be written.\n    // Can be any byte string, including the empty string.\n    bytes column_qualifier = 2;\n\n    // The timestamp of the cell into which new data should be written.\n    // Use -1 for current Bigtable server time.\n    // Otherwise, the client should set this value itself, noting that the\n    // default value is a timestamp of zero if the field is left unspecified.\n    // Values must match the \"granularity\" of the table (e.g. micros, millis).\n    int64 timestamp_micros = 3;\n\n    // The value to be written into the specified cell.\n    bytes value = 4;\n  }\n\n  // A Mutation which deletes cells from the specified column, optionally\n  // restricting the deletions to a given timestamp range.\n  message DeleteFromColumn {\n    // The name of the family from which cells should be deleted.\n    // Must match [-_.a-zA-Z0-9]+\n    string family_name = 1;\n\n    // The qualifier of the column from which cells should be deleted.\n    // Can be any byte string, including the empty string.\n    bytes column_qualifier = 2;\n\n    // The range of timestamps within which cells should be deleted.\n    TimestampRange time_range = 3;\n  }\n\n  // A Mutation which deletes all cells from the specified column family.\n  message DeleteFromFamily {\n    // The name of the family from which cells should be deleted.\n    // Must match [-_.a-zA-Z0-9]+\n    string family_name = 1;\n  }\n\n  // A Mutation which deletes all cells from the containing row.\n  message DeleteFromRow {\n\n  }\n\n  oneof mutation {\n    // Set a cell's value.\n    SetCell set_cell = 1;\n\n    // Deletes cells from a column.\n    DeleteFromColumn delete_from_column = 2;\n\n    // Deletes cells from a column family.\n    DeleteFromFamily delete_from_family = 3;\n\n    // Deletes cells from the entire row.\n    DeleteFromRow delete_from_row = 4;\n  }\n}\n\n// Specifies an atomic read/modify/write operation on the latest value of the\n// specified column.\nmessage ReadModifyWriteRule {\n  // The name of the family to which the read/modify/write should be applied.\n  // Must match [-_.a-zA-Z0-9]+\n  string family_name = 1;\n\n  // The qualifier of the column to which the read/modify/write should be\n  // applied.\n  // Can be any byte string, including the empty string.\n  bytes column_qualifier = 2;\n\n  oneof rule {\n    // Rule specifying that \"append_value\" be appended to the existing value.\n    // If the targeted cell is unset, it will be treated as containing the\n    // empty string.\n    bytes append_value = 3;\n\n    // Rule specifying that \"increment_amount\" be added to the existing value.\n    // If the targeted cell is unset, it will be treated as containing a zero.\n    // Otherwise, the targeted cell must contain an 8-byte value (interpreted\n    // as a 64-bit big-endian signed integer), or the entire request will fail.\n    int64 increment_amount = 4;\n  }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/empty/empty.proto\n// DO NOT EDIT!\n\n/*\nPackage google_protobuf is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/cloud/bigtable/internal/empty/empty.proto\n\nIt has these top-level messages:\n\tEmpty\n*/\npackage google_protobuf\n\nimport proto \"github.com/golang/protobuf/proto\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\n// A generic empty message that you can re-use to avoid defining duplicated\n// empty messages in your APIs. A typical example is to use it as the request\n// or the response type of an API method. For instance:\n//\n//     service Foo {\n//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n//     }\n//\ntype Empty struct {\n}\n\nfunc (m *Empty) Reset()         { *m = Empty{} }\nfunc (m *Empty) String() string { return proto.CompactTextString(m) }\nfunc (*Empty) ProtoMessage()    {}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/empty/empty.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption java_multiple_files = true;\noption java_outer_classname = \"EmptyProto\";\noption java_package = \"com.google.protobuf\";\n\n\n// A generic empty message that you can re-use to avoid defining duplicated\n// empty messages in your APIs. A typical example is to use it as the request\n// or the response type of an API method. For instance:\n//\n//     service Foo {\n//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n//     }\n//\nmessage Empty {\n\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/regen.sh",
    "content": "#!/bin/bash -e\n#\n# This script rebuilds the generated code for the protocol buffers.\n# To run this you will need protoc and goprotobuf installed;\n# see https://github.com/golang/protobuf for instructions.\n# You also need Go and Git installed.\n\nPKG=google.golang.org/cloud/bigtable\nUPSTREAM=https://github.com/GoogleCloudPlatform/cloud-bigtable-client\nUPSTREAM_SUBDIR=bigtable-protos/src/main/proto\n\nfunction die() {\n  echo 1>&2 $*\n  exit 1\n}\n\n# Sanity check that the right tools are accessible.\nfor tool in go git protoc protoc-gen-go; do\n  q=$(which $tool) || die \"didn't find $tool\"\n  echo 1>&2 \"$tool: $q\"\ndone\n\ntmpdir=$(mktemp -d -t regen-cbt.XXXXXX)\ntrap 'rm -rf $tmpdir' EXIT\n\necho -n 1>&2 \"finding package dir... \"\npkgdir=$(go list -f '{{.Dir}}' $PKG)\necho 1>&2 $pkgdir\nbase=$(echo $pkgdir | sed \"s,/$PKG\\$,,\")\necho 1>&2 \"base: $base\"\ncd $base\n\necho 1>&2 \"fetching latest protos... \"\ngit clone -q $UPSTREAM $tmpdir\n# Pass 1: build mapping from upstream filename to our filename.\ndeclare -A filename_map\nfor f in $(cd $PKG && find internal -name '*.proto'); do\n  echo -n 1>&2 \"looking for latest version of $f... \"\n  up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f))\n  echo 1>&2 $up\n  if [ $(echo $up | wc -w) != \"1\" ]; then\n    die \"not exactly one match\"\n  fi\n  filename_map[$up]=$f\ndone\n# Pass 2: build sed script for fixing imports.\nimport_fixes=$tmpdir/fix_imports.sed\nfor up in \"${!filename_map[@]}\"; do\n  f=${filename_map[$up]}\n  echo >>$import_fixes \"s,\\\"$up\\\",\\\"$PKG/$f\\\",\"\ndone\ncat $import_fixes | sed 's,^,### ,' 1>&2\n# Pass 3: copy files, making necessary adjustments.\nfor up in \"${!filename_map[@]}\"; do\n  f=${filename_map[$up]}\n  cat $tmpdir/$UPSTREAM_SUBDIR/$up |\n    # Adjust proto imports.\n    sed -f $import_fixes |\n    # Drop the UndeleteCluster RPC method. It returns a google.longrunning.Operation.\n    sed '/^  rpc UndeleteCluster(/,/^  }$/d' |\n    # Drop annotations, long-running operations and timestamps. They aren't supported (yet).\n    sed '/\"google\\/longrunning\\/operations.proto\"/d' |\n    sed '/google.longrunning.Operation/d' |\n    sed '/\"google\\/protobuf\\/timestamp.proto\"/d' |\n    sed '/google\\.protobuf\\.Timestamp/d' |\n    sed '/\"google\\/api\\/annotations.proto\"/d' |\n    sed '/option.*google\\.api\\.http.*{.*};$/d' |\n    cat > $PKG/$f\ndone\n\n# Run protoc once per package.\nfor dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do\n  echo 1>&2 \"* $dir\"\n  protoc --go_out=plugins=grpc:. $dir/*.proto\ndone\necho 1>&2 \"All OK\"\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto\n// DO NOT EDIT!\n\npackage google_bigtable_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport google_bigtable_v11 \"google.golang.org/cloud/bigtable/internal/data_proto\"\nimport google_protobuf \"google.golang.org/cloud/bigtable/internal/empty\"\n\nimport (\n\tcontext \"golang.org/x/net/context\"\n\tgrpc \"google.golang.org/grpc\"\n)\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ context.Context\nvar _ grpc.ClientConn\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\nfunc init() {\n}\n\n// Client API for BigtableService service\n\ntype BigtableServiceClient interface {\n\t// Streams back the contents of all requested rows, optionally applying\n\t// the same Reader filter to each. Depending on their size, rows may be\n\t// broken up across multiple responses, but atomicity of each row will still\n\t// be preserved.\n\tReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error)\n\t// Returns a sample of row keys in the table. The returned row keys will\n\t// delimit contiguous sections of the table of approximately equal size,\n\t// which can be used to break up the data for distributed tasks like\n\t// mapreduces.\n\tSampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error)\n\t// Mutates a row atomically. Cells already present in the row are left\n\t// unchanged unless explicitly changed by 'mutation'.\n\tMutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)\n\t// Mutates a row atomically based on the output of a predicate Reader filter.\n\tCheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error)\n\t// Modifies a row atomically, reading the latest existing timestamp/value from\n\t// the specified columns and writing a new value at\n\t// max(existing timestamp, current server time) based on pre-defined\n\t// read/modify/write rules. Returns the new contents of all modified cells.\n\tReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error)\n}\n\ntype bigtableServiceClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewBigtableServiceClient(cc *grpc.ClientConn) BigtableServiceClient {\n\treturn &bigtableServiceClient{cc}\n}\n\nfunc (c *bigtableServiceClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[0], c.cc, \"/google.bigtable.v1.BigtableService/ReadRows\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &bigtableServiceReadRowsClient{stream}\n\tif err := x.ClientStream.SendMsg(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn x, nil\n}\n\ntype BigtableService_ReadRowsClient interface {\n\tRecv() (*ReadRowsResponse, error)\n\tgrpc.ClientStream\n}\n\ntype bigtableServiceReadRowsClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *bigtableServiceReadRowsClient) Recv() (*ReadRowsResponse, error) {\n\tm := new(ReadRowsResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *bigtableServiceClient) SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[1], c.cc, \"/google.bigtable.v1.BigtableService/SampleRowKeys\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &bigtableServiceSampleRowKeysClient{stream}\n\tif err := x.ClientStream.SendMsg(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn x, nil\n}\n\ntype BigtableService_SampleRowKeysClient interface {\n\tRecv() (*SampleRowKeysResponse, error)\n\tgrpc.ClientStream\n}\n\ntype bigtableServiceSampleRowKeysClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *bigtableServiceSampleRowKeysClient) Recv() (*SampleRowKeysResponse, error) {\n\tm := new(SampleRowKeysResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {\n\tout := new(google_protobuf.Empty)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.v1.BigtableService/MutateRow\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableServiceClient) CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) {\n\tout := new(CheckAndMutateRowResponse)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.v1.BigtableService/CheckAndMutateRow\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableServiceClient) ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error) {\n\tout := new(google_bigtable_v11.Row)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.v1.BigtableService/ReadModifyWriteRow\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// Server API for BigtableService service\n\ntype BigtableServiceServer interface {\n\t// Streams back the contents of all requested rows, optionally applying\n\t// the same Reader filter to each. Depending on their size, rows may be\n\t// broken up across multiple responses, but atomicity of each row will still\n\t// be preserved.\n\tReadRows(*ReadRowsRequest, BigtableService_ReadRowsServer) error\n\t// Returns a sample of row keys in the table. The returned row keys will\n\t// delimit contiguous sections of the table of approximately equal size,\n\t// which can be used to break up the data for distributed tasks like\n\t// mapreduces.\n\tSampleRowKeys(*SampleRowKeysRequest, BigtableService_SampleRowKeysServer) error\n\t// Mutates a row atomically. Cells already present in the row are left\n\t// unchanged unless explicitly changed by 'mutation'.\n\tMutateRow(context.Context, *MutateRowRequest) (*google_protobuf.Empty, error)\n\t// Mutates a row atomically based on the output of a predicate Reader filter.\n\tCheckAndMutateRow(context.Context, *CheckAndMutateRowRequest) (*CheckAndMutateRowResponse, error)\n\t// Modifies a row atomically, reading the latest existing timestamp/value from\n\t// the specified columns and writing a new value at\n\t// max(existing timestamp, current server time) based on pre-defined\n\t// read/modify/write rules. Returns the new contents of all modified cells.\n\tReadModifyWriteRow(context.Context, *ReadModifyWriteRowRequest) (*google_bigtable_v11.Row, error)\n}\n\nfunc RegisterBigtableServiceServer(s *grpc.Server, srv BigtableServiceServer) {\n\ts.RegisterService(&_BigtableService_serviceDesc, srv)\n}\n\nfunc _BigtableService_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error {\n\tm := new(ReadRowsRequest)\n\tif err := stream.RecvMsg(m); err != nil {\n\t\treturn err\n\t}\n\treturn srv.(BigtableServiceServer).ReadRows(m, &bigtableServiceReadRowsServer{stream})\n}\n\ntype BigtableService_ReadRowsServer interface {\n\tSend(*ReadRowsResponse) error\n\tgrpc.ServerStream\n}\n\ntype bigtableServiceReadRowsServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *bigtableServiceReadRowsServer) Send(m *ReadRowsResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc _BigtableService_SampleRowKeys_Handler(srv interface{}, stream grpc.ServerStream) error {\n\tm := new(SampleRowKeysRequest)\n\tif err := stream.RecvMsg(m); err != nil {\n\t\treturn err\n\t}\n\treturn srv.(BigtableServiceServer).SampleRowKeys(m, &bigtableServiceSampleRowKeysServer{stream})\n}\n\ntype BigtableService_SampleRowKeysServer interface {\n\tSend(*SampleRowKeysResponse) error\n\tgrpc.ServerStream\n}\n\ntype bigtableServiceSampleRowKeysServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *bigtableServiceSampleRowKeysServer) Send(m *SampleRowKeysResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc _BigtableService_MutateRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(MutateRowRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableServiceServer).MutateRow(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableService_CheckAndMutateRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(CheckAndMutateRowRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableServiceServer).CheckAndMutateRow(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableService_ReadModifyWriteRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(ReadModifyWriteRowRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableServiceServer).ReadModifyWriteRow(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nvar _BigtableService_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"google.bigtable.v1.BigtableService\",\n\tHandlerType: (*BigtableServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"MutateRow\",\n\t\t\tHandler:    _BigtableService_MutateRow_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"CheckAndMutateRow\",\n\t\t\tHandler:    _BigtableService_CheckAndMutateRow_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ReadModifyWriteRow\",\n\t\t\tHandler:    _BigtableService_ReadModifyWriteRow_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"ReadRows\",\n\t\t\tHandler:       _BigtableService_ReadRows_Handler,\n\t\t\tServerStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"SampleRowKeys\",\n\t\t\tHandler:       _BigtableService_SampleRowKeys_Handler,\n\t\t\tServerStreams: true,\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.v1;\n\nimport \"google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto\";\nimport \"google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto\";\nimport \"google.golang.org/cloud/bigtable/internal/empty/empty.proto\";\n\noption java_generic_services = true;\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableServicesProto\";\noption java_package = \"com.google.bigtable.v1\";\n\n\n// Service for reading from and writing to existing Bigtables.\nservice BigtableService {\n  // Streams back the contents of all requested rows, optionally applying\n  // the same Reader filter to each. Depending on their size, rows may be\n  // broken up across multiple responses, but atomicity of each row will still\n  // be preserved.\n  rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {\n  }\n\n  // Returns a sample of row keys in the table. The returned row keys will\n  // delimit contiguous sections of the table of approximately equal size,\n  // which can be used to break up the data for distributed tasks like\n  // mapreduces.\n  rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {\n  }\n\n  // Mutates a row atomically. Cells already present in the row are left\n  // unchanged unless explicitly changed by 'mutation'.\n  rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {\n  }\n\n  // Mutates a row atomically based on the output of a predicate Reader filter.\n  rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {\n  }\n\n  // Modifies a row atomically, reading the latest existing timestamp/value from\n  // the specified columns and writing a new value at\n  // max(existing timestamp, current server time) based on pre-defined\n  // read/modify/write rules. Returns the new contents of all modified cells.\n  rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {\n  }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto\n// DO NOT EDIT!\n\n/*\nPackage google_bigtable_v1 is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto\n\tgoogle.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto\n\nIt has these top-level messages:\n\tReadRowsRequest\n\tReadRowsResponse\n\tSampleRowKeysRequest\n\tSampleRowKeysResponse\n\tMutateRowRequest\n\tCheckAndMutateRowRequest\n\tCheckAndMutateRowResponse\n\tReadModifyWriteRowRequest\n*/\npackage google_bigtable_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport google_bigtable_v11 \"google.golang.org/cloud/bigtable/internal/data_proto\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\n// Request message for BigtableServer.ReadRows.\ntype ReadRowsRequest struct {\n\t// The unique name of the table from which to read.\n\tTableName string `protobuf:\"bytes,1,opt,name=table_name\" json:\"table_name,omitempty\"`\n\t// The key of a single row from which to read.\n\tRowKey []byte `protobuf:\"bytes,2,opt,name=row_key,proto3\" json:\"row_key,omitempty\"`\n\t// A range of rows from which to read.\n\tRowRange *google_bigtable_v11.RowRange `protobuf:\"bytes,3,opt,name=row_range\" json:\"row_range,omitempty\"`\n\t// The filter to apply to the contents of the specified row(s), in the\n\t// deprecated string format. If unset, reads the most recent value from all\n\t// readable columns.\n\tDEPRECATEDStringFilter string `protobuf:\"bytes,4,opt,name=DEPRECATED_string_filter\" json:\"DEPRECATED_string_filter,omitempty\"`\n\t// The filter to apply to the contents of the specified row(s). If unset,\n\t// reads the entire table.\n\tFilter *google_bigtable_v11.RowFilter `protobuf:\"bytes,5,opt,name=filter\" json:\"filter,omitempty\"`\n\t// By default, rows are read sequentially, producing results which are\n\t// guaranteed to arrive in increasing row order. Setting\n\t// \"allow_row_interleaving\" to true allows multiple rows to be interleaved in\n\t// the response stream, which increases throughput but breaks this guarantee,\n\t// and may force the client to use more memory to buffer partially-received\n\t// rows.\n\tAllowRowInterleaving bool `protobuf:\"varint,6,opt,name=allow_row_interleaving\" json:\"allow_row_interleaving,omitempty\"`\n\t// The read will terminate after committing to N rows' worth of results. The\n\t// default (zero) is to return all results.\n\t// Note that if \"allow_row_interleaving\" is set to true, partial results may\n\t// be returned for more than N rows. However, only N \"commit_row\" chunks will\n\t// be sent.\n\tNumRowsLimit int64 `protobuf:\"varint,7,opt,name=num_rows_limit\" json:\"num_rows_limit,omitempty\"`\n}\n\nfunc (m *ReadRowsRequest) Reset()         { *m = ReadRowsRequest{} }\nfunc (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ReadRowsRequest) ProtoMessage()    {}\n\nfunc (m *ReadRowsRequest) GetRowRange() *google_bigtable_v11.RowRange {\n\tif m != nil {\n\t\treturn m.RowRange\n\t}\n\treturn nil\n}\n\nfunc (m *ReadRowsRequest) GetFilter() *google_bigtable_v11.RowFilter {\n\tif m != nil {\n\t\treturn m.Filter\n\t}\n\treturn nil\n}\n\n// Response message for BigtableService.ReadRows.\ntype ReadRowsResponse struct {\n\t// The key of the row for which we're receiving data.\n\t// Results will be received in increasing row key order, unless\n\t// \"allow_row_interleaving\" was specified in the request.\n\tRowKey []byte `protobuf:\"bytes,1,opt,name=row_key,proto3\" json:\"row_key,omitempty\"`\n\t// One or more chunks of the row specified by \"row_key\".\n\tChunks []*ReadRowsResponse_Chunk `protobuf:\"bytes,2,rep,name=chunks\" json:\"chunks,omitempty\"`\n}\n\nfunc (m *ReadRowsResponse) Reset()         { *m = ReadRowsResponse{} }\nfunc (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ReadRowsResponse) ProtoMessage()    {}\n\nfunc (m *ReadRowsResponse) GetChunks() []*ReadRowsResponse_Chunk {\n\tif m != nil {\n\t\treturn m.Chunks\n\t}\n\treturn nil\n}\n\n// Specifies a piece of a row's contents returned as part of the read\n// response stream.\ntype ReadRowsResponse_Chunk struct {\n\t// A subset of the data from a particular row. As long as no \"reset_row\"\n\t// is received in between, multiple \"row_contents\" from the same row are\n\t// from the same atomic view of that row, and will be received in the\n\t// expected family/column/timestamp order.\n\tRowContents *google_bigtable_v11.Family `protobuf:\"bytes,1,opt,name=row_contents\" json:\"row_contents,omitempty\"`\n\t// Indicates that the client should drop all previous chunks for\n\t// \"row_key\", as it will be re-read from the beginning.\n\tResetRow bool `protobuf:\"varint,2,opt,name=reset_row\" json:\"reset_row,omitempty\"`\n\t// Indicates that the client can safely process all previous chunks for\n\t// \"row_key\", as its data has been fully read.\n\tCommitRow bool `protobuf:\"varint,3,opt,name=commit_row\" json:\"commit_row,omitempty\"`\n}\n\nfunc (m *ReadRowsResponse_Chunk) Reset()         { *m = ReadRowsResponse_Chunk{} }\nfunc (m *ReadRowsResponse_Chunk) String() string { return proto.CompactTextString(m) }\nfunc (*ReadRowsResponse_Chunk) ProtoMessage()    {}\n\nfunc (m *ReadRowsResponse_Chunk) GetRowContents() *google_bigtable_v11.Family {\n\tif m != nil {\n\t\treturn m.RowContents\n\t}\n\treturn nil\n}\n\n// Request message for BigtableService.SampleRowKeys.\ntype SampleRowKeysRequest struct {\n\t// The unique name of the table from which to sample row keys.\n\tTableName string `protobuf:\"bytes,1,opt,name=table_name\" json:\"table_name,omitempty\"`\n}\n\nfunc (m *SampleRowKeysRequest) Reset()         { *m = SampleRowKeysRequest{} }\nfunc (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SampleRowKeysRequest) ProtoMessage()    {}\n\n// Response message for BigtableService.SampleRowKeys.\ntype SampleRowKeysResponse struct {\n\t// Sorted streamed sequence of sample row keys in the table. The table might\n\t// have contents before the first row key in the list and after the last one,\n\t// but a key containing the empty string indicates \"end of table\" and will be\n\t// the last response given, if present.\n\t// Note that row keys in this list may not have ever been written to or read\n\t// from, and users should therefore not make any assumptions about the row key\n\t// structure that are specific to their use case.\n\tRowKey []byte `protobuf:\"bytes,1,opt,name=row_key,proto3\" json:\"row_key,omitempty\"`\n\t// Approximate total storage space used by all rows in the table which precede\n\t// \"row_key\". Buffering the contents of all rows between two subsequent\n\t// samples would require space roughly equal to the difference in their\n\t// \"offset_bytes\" fields.\n\tOffsetBytes int64 `protobuf:\"varint,2,opt,name=offset_bytes\" json:\"offset_bytes,omitempty\"`\n}\n\nfunc (m *SampleRowKeysResponse) Reset()         { *m = SampleRowKeysResponse{} }\nfunc (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) }\nfunc (*SampleRowKeysResponse) ProtoMessage()    {}\n\n// Request message for BigtableService.MutateRow.\ntype MutateRowRequest struct {\n\t// The unique name of the table to which the mutation should be applied.\n\tTableName string `protobuf:\"bytes,1,opt,name=table_name\" json:\"table_name,omitempty\"`\n\t// The key of the row to which the mutation should be applied.\n\tRowKey []byte `protobuf:\"bytes,2,opt,name=row_key,proto3\" json:\"row_key,omitempty\"`\n\t// Changes to be atomically applied to the specified row. Entries are applied\n\t// in order, meaning that earlier mutations can be masked by later ones.\n\tMutations []*google_bigtable_v11.Mutation `protobuf:\"bytes,3,rep,name=mutations\" json:\"mutations,omitempty\"`\n}\n\nfunc (m *MutateRowRequest) Reset()         { *m = MutateRowRequest{} }\nfunc (m *MutateRowRequest) String() string { return proto.CompactTextString(m) }\nfunc (*MutateRowRequest) ProtoMessage()    {}\n\nfunc (m *MutateRowRequest) GetMutations() []*google_bigtable_v11.Mutation {\n\tif m != nil {\n\t\treturn m.Mutations\n\t}\n\treturn nil\n}\n\n// Request message for BigtableService.CheckAndMutateRowRequest\ntype CheckAndMutateRowRequest struct {\n\t// The unique name of the table to which the conditional mutation should be\n\t// applied.\n\tTableName string `protobuf:\"bytes,1,opt,name=table_name\" json:\"table_name,omitempty\"`\n\t// The key of the row to which the conditional mutation should be applied.\n\tRowKey []byte `protobuf:\"bytes,2,opt,name=row_key,proto3\" json:\"row_key,omitempty\"`\n\t// Changes to be atomically applied to the specified row if \"predicate_filter\"\n\t// yields at least one cell when applied to \"row_key\". Entries are applied in\n\t// order, meaning that earlier mutations can be masked by later ones.\n\t// Must contain at least one entry if \"false_mutations\" is empty.\n\tTrueMutations []*google_bigtable_v11.Mutation `protobuf:\"bytes,4,rep,name=true_mutations\" json:\"true_mutations,omitempty\"`\n\t// Changes to be atomically applied to the specified row if \"predicate_filter\"\n\t// does not yield any cells when applied to \"row_key\". Entries are applied in\n\t// order, meaning that earlier mutations can be masked by later ones.\n\t// Must contain at least one entry if \"true_mutations\" is empty.\n\tFalseMutations []*google_bigtable_v11.Mutation `protobuf:\"bytes,5,rep,name=false_mutations\" json:\"false_mutations,omitempty\"`\n\t// The filter to be applied to the contents of the specified row, in the\n\t// deprecated string format. Depending on whether or not any results are\n\t// yielded, either \"true_mutations\" or \"false_mutations\" will be executed. If\n\t// unset, checks that the row contains any values at all.\n\tDEPRECATEDField_3 string `protobuf:\"bytes,3,opt,name=DEPRECATED_field_3\" json:\"DEPRECATED_field_3,omitempty\"`\n\t// The filter to be applied to the contents of the specified row. Depending\n\t// on whether or not any results are yielded, either \"true_mutations\" or\n\t// \"false_mutations\" will be executed. If unset, checks that the row contains\n\t// any values at all.\n\tPredicateFilter *google_bigtable_v11.RowFilter `protobuf:\"bytes,6,opt,name=predicate_filter\" json:\"predicate_filter,omitempty\"`\n}\n\nfunc (m *CheckAndMutateRowRequest) Reset()         { *m = CheckAndMutateRowRequest{} }\nfunc (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CheckAndMutateRowRequest) ProtoMessage()    {}\n\nfunc (m *CheckAndMutateRowRequest) GetTrueMutations() []*google_bigtable_v11.Mutation {\n\tif m != nil {\n\t\treturn m.TrueMutations\n\t}\n\treturn nil\n}\n\nfunc (m *CheckAndMutateRowRequest) GetFalseMutations() []*google_bigtable_v11.Mutation {\n\tif m != nil {\n\t\treturn m.FalseMutations\n\t}\n\treturn nil\n}\n\nfunc (m *CheckAndMutateRowRequest) GetPredicateFilter() *google_bigtable_v11.RowFilter {\n\tif m != nil {\n\t\treturn m.PredicateFilter\n\t}\n\treturn nil\n}\n\n// Response message for BigtableService.CheckAndMutateRowRequest.\ntype CheckAndMutateRowResponse struct {\n\t// Whether or not the request's \"predicate_filter\" yielded any results for\n\t// the specified row.\n\tPredicateMatched bool `protobuf:\"varint,1,opt,name=predicate_matched\" json:\"predicate_matched,omitempty\"`\n}\n\nfunc (m *CheckAndMutateRowResponse) Reset()         { *m = CheckAndMutateRowResponse{} }\nfunc (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CheckAndMutateRowResponse) ProtoMessage()    {}\n\n// Request message for BigtableService.ReadModifyWriteRowRequest.\ntype ReadModifyWriteRowRequest struct {\n\t// The unique name of the table to which the read/modify/write rules should be\n\t// applied.\n\tTableName string `protobuf:\"bytes,1,opt,name=table_name\" json:\"table_name,omitempty\"`\n\t// The key of the row to which the read/modify/write rules should be applied.\n\tRowKey []byte `protobuf:\"bytes,2,opt,name=row_key,proto3\" json:\"row_key,omitempty\"`\n\t// Rules specifying how the specified row's contents are to be transformed\n\t// into writes. Entries are applied in order, meaning that earlier rules will\n\t// affect the results of later ones.\n\tRules []*google_bigtable_v11.ReadModifyWriteRule `protobuf:\"bytes,3,rep,name=rules\" json:\"rules,omitempty\"`\n}\n\nfunc (m *ReadModifyWriteRowRequest) Reset()         { *m = ReadModifyWriteRowRequest{} }\nfunc (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ReadModifyWriteRowRequest) ProtoMessage()    {}\n\nfunc (m *ReadModifyWriteRowRequest) GetRules() []*google_bigtable_v11.ReadModifyWriteRule {\n\tif m != nil {\n\t\treturn m.Rules\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.v1;\n\nimport \"google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto\";\n\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableServiceMessagesProto\";\noption java_package = \"com.google.bigtable.v1\";\n\n\n// Request message for BigtableServer.ReadRows.\nmessage ReadRowsRequest {\n  // The unique name of the table from which to read.\n  string table_name = 1;\n\n  oneof target {\n    // The key of a single row from which to read.\n    bytes row_key = 2;\n\n    // A range of rows from which to read.\n    RowRange row_range = 3;\n  }\n\n  // The filter to apply to the contents of the specified row(s), in the\n  // deprecated string format. If unset, reads the most recent value from all\n  // readable columns.\n  string DEPRECATED_string_filter = 4;\n\n  // The filter to apply to the contents of the specified row(s). If unset,\n  // reads the entire table.\n  RowFilter filter = 5;\n\n  // By default, rows are read sequentially, producing results which are\n  // guaranteed to arrive in increasing row order. Setting\n  // \"allow_row_interleaving\" to true allows multiple rows to be interleaved in\n  // the response stream, which increases throughput but breaks this guarantee,\n  // and may force the client to use more memory to buffer partially-received\n  // rows.\n  bool allow_row_interleaving = 6;\n\n  // The read will terminate after committing to N rows' worth of results. The\n  // default (zero) is to return all results.\n  // Note that if \"allow_row_interleaving\" is set to true, partial results may\n  // be returned for more than N rows. However, only N \"commit_row\" chunks will\n  // be sent.\n  int64 num_rows_limit = 7;\n}\n\n// Response message for BigtableService.ReadRows.\nmessage ReadRowsResponse {\n  // Specifies a piece of a row's contents returned as part of the read\n  // response stream.\n  message Chunk {\n    oneof chunk {\n      // A subset of the data from a particular row. As long as no \"reset_row\"\n      // is received in between, multiple \"row_contents\" from the same row are\n      // from the same atomic view of that row, and will be received in the\n      // expected family/column/timestamp order.\n      Family row_contents = 1;\n\n      // Indicates that the client should drop all previous chunks for\n      // \"row_key\", as it will be re-read from the beginning.\n      bool reset_row = 2;\n\n      // Indicates that the client can safely process all previous chunks for\n      // \"row_key\", as its data has been fully read.\n      bool commit_row = 3;\n    }\n  }\n\n  // The key of the row for which we're receiving data.\n  // Results will be received in increasing row key order, unless\n  // \"allow_row_interleaving\" was specified in the request.\n  bytes row_key = 1;\n\n  // One or more chunks of the row specified by \"row_key\".\n  repeated Chunk chunks = 2;\n}\n\n// Request message for BigtableService.SampleRowKeys.\nmessage SampleRowKeysRequest {\n  // The unique name of the table from which to sample row keys.\n  string table_name = 1;\n}\n\n// Response message for BigtableService.SampleRowKeys.\nmessage SampleRowKeysResponse {\n  // Sorted streamed sequence of sample row keys in the table. The table might\n  // have contents before the first row key in the list and after the last one,\n  // but a key containing the empty string indicates \"end of table\" and will be\n  // the last response given, if present.\n  // Note that row keys in this list may not have ever been written to or read\n  // from, and users should therefore not make any assumptions about the row key\n  // structure that are specific to their use case.\n  bytes row_key = 1;\n\n  // Approximate total storage space used by all rows in the table which precede\n  // \"row_key\". Buffering the contents of all rows between two subsequent\n  // samples would require space roughly equal to the difference in their\n  // \"offset_bytes\" fields.\n  int64 offset_bytes = 2;\n}\n\n// Request message for BigtableService.MutateRow.\nmessage MutateRowRequest {\n  // The unique name of the table to which the mutation should be applied.\n  string table_name = 1;\n\n  // The key of the row to which the mutation should be applied.\n  bytes row_key = 2;\n\n  // Changes to be atomically applied to the specified row. Entries are applied\n  // in order, meaning that earlier mutations can be masked by later ones.\n  repeated Mutation mutations = 3;\n}\n\n// Request message for BigtableService.CheckAndMutateRowRequest\nmessage CheckAndMutateRowRequest {\n  // The unique name of the table to which the conditional mutation should be\n  // applied.\n  string table_name = 1;\n\n  // The key of the row to which the conditional mutation should be applied.\n  bytes row_key = 2;\n\n  // Changes to be atomically applied to the specified row if \"predicate_filter\"\n  // yields at least one cell when applied to \"row_key\". Entries are applied in\n  // order, meaning that earlier mutations can be masked by later ones.\n  // Must contain at least one entry if \"false_mutations\" is empty.\n  repeated Mutation true_mutations = 4;\n\n  // Changes to be atomically applied to the specified row if \"predicate_filter\"\n  // does not yield any cells when applied to \"row_key\". Entries are applied in\n  // order, meaning that earlier mutations can be masked by later ones.\n  // Must contain at least one entry if \"true_mutations\" is empty.\n  repeated Mutation false_mutations = 5;\n\n  // The filter to be applied to the contents of the specified row, in the\n  // deprecated string format. Depending on whether or not any results are\n  // yielded, either \"true_mutations\" or \"false_mutations\" will be executed. If\n  // unset, checks that the row contains any values at all.\n  string DEPRECATED_field_3 = 3;\n\n  // The filter to be applied to the contents of the specified row. Depending\n  // on whether or not any results are yielded, either \"true_mutations\" or\n  // \"false_mutations\" will be executed. If unset, checks that the row contains\n  // any values at all.\n  RowFilter predicate_filter = 6;\n}\n\n// Response message for BigtableService.CheckAndMutateRowRequest.\nmessage CheckAndMutateRowResponse {\n  // Whether or not the request's \"predicate_filter\" yielded any results for\n  // the specified row.\n  bool predicate_matched = 1;\n}\n\n// Request message for BigtableService.ReadModifyWriteRowRequest.\nmessage ReadModifyWriteRowRequest {\n  // The unique name of the table to which the read/modify/write rules should be\n  // applied.\n  string table_name = 1;\n\n  // The key of the row to which the read/modify/write rules should be applied.\n  bytes row_key = 2;\n\n  // Rules specifying how the specified row's contents are to be transformed\n  // into writes. Entries are applied in order, meaning that earlier rules will\n  // affect the results of later ones.\n  repeated ReadModifyWriteRule rules = 3;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto\n// DO NOT EDIT!\n\n/*\nPackage google_bigtable_admin_table_v1 is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto\n\nIt has these top-level messages:\n\tTable\n\tColumnFamily\n*/\npackage google_bigtable_admin_table_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\ntype Table_TimestampGranularity int32\n\nconst (\n\tTable_MILLIS Table_TimestampGranularity = 0\n)\n\nvar Table_TimestampGranularity_name = map[int32]string{\n\t0: \"MILLIS\",\n}\nvar Table_TimestampGranularity_value = map[string]int32{\n\t\"MILLIS\": 0,\n}\n\nfunc (x Table_TimestampGranularity) String() string {\n\treturn proto.EnumName(Table_TimestampGranularity_name, int32(x))\n}\n\n// A collection of user data indexed by row, column, and timestamp.\n// Each table is served using the resources of its parent cluster.\ntype Table struct {\n\t// A unique identifier of the form\n\t// <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The column families configured for this table, mapped by column family id.\n\tColumnFamilies map[string]*ColumnFamily `protobuf:\"bytes,3,rep,name=column_families\" json:\"column_families,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\t// The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in\n\t// this table. Timestamps not matching the granularity will be rejected.\n\t// Cannot be changed once the table is created.\n\tGranularity Table_TimestampGranularity `protobuf:\"varint,4,opt,name=granularity,enum=google.bigtable.admin.table.v1.Table_TimestampGranularity\" json:\"granularity,omitempty\"`\n}\n\nfunc (m *Table) Reset()         { *m = Table{} }\nfunc (m *Table) String() string { return proto.CompactTextString(m) }\nfunc (*Table) ProtoMessage()    {}\n\nfunc (m *Table) GetColumnFamilies() map[string]*ColumnFamily {\n\tif m != nil {\n\t\treturn m.ColumnFamilies\n\t}\n\treturn nil\n}\n\n// A set of columns within a table which share a common configuration.\ntype ColumnFamily struct {\n\t// A unique identifier of the form <table_name>/families/[-_.a-zA-Z0-9]+\n\t// The last segment is the same as the \"name\" field in\n\t// google.bigtable.v1.Family.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// Garbage collection expression specified by the following grammar:\n\t//   GC = EXPR\n\t//      | \"\" ;\n\t//   EXPR = EXPR, \"||\", EXPR              (* lowest precedence *)\n\t//        | EXPR, \"&&\", EXPR\n\t//        | \"(\", EXPR, \")\"                (* highest precedence *)\n\t//        | PROP ;\n\t//   PROP = \"version() >\", NUM32\n\t//        | \"age() >\", NUM64, [ UNIT ] ;\n\t//   NUM32 = non-zero-digit { digit } ;    (* # NUM32 <= 2^32 - 1 *)\n\t//   NUM64 = non-zero-digit { digit } ;    (* # NUM64 <= 2^63 - 1 *)\n\t//   UNIT =  \"d\" | \"h\" | \"m\"  (* d=days, h=hours, m=minutes, else micros *)\n\t// GC expressions can be up to 500 characters in length\n\t//\n\t// The different types of PROP are defined as follows:\n\t//   version() - cell index, counting from most recent and starting at 1\n\t//   age() - age of the cell (current time minus cell timestamp)\n\t//\n\t// Example: \"version() > 3 || (age() > 3d && version() > 1)\"\n\t//   drop cells beyond the most recent three, and drop cells older than three\n\t//   days unless they're the most recent cell in the row/column\n\t//\n\t// Garbage collection executes opportunistically in the background, and so\n\t// it's possible for reads to return a cell even if it matches the active GC\n\t// expression for its family.\n\tGcExpression string `protobuf:\"bytes,2,opt,name=gc_expression\" json:\"gc_expression,omitempty\"`\n}\n\nfunc (m *ColumnFamily) Reset()         { *m = ColumnFamily{} }\nfunc (m *ColumnFamily) String() string { return proto.CompactTextString(m) }\nfunc (*ColumnFamily) ProtoMessage()    {}\n\nfunc init() {\n\tproto.RegisterEnum(\"google.bigtable.admin.table.v1.Table_TimestampGranularity\", Table_TimestampGranularity_name, Table_TimestampGranularity_value)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.admin.table.v1;\n\n\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableTableDataProto\";\noption java_package = \"com.google.bigtable.admin.table.v1\";\n\n\n// A collection of user data indexed by row, column, and timestamp.\n// Each table is served using the resources of its parent cluster.\nmessage Table {\n  enum TimestampGranularity {\n    MILLIS = 0;\n  }\n\n  // A unique identifier of the form\n  // <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*\n  string name = 1;\n\n  // If this Table is in the process of being created, the Operation used to\n  // track its progress. As long as this operation is present, the Table will\n  // not accept any Table Admin or Read/Write requests.\n\n  // The column families configured for this table, mapped by column family id.\n  map<string, ColumnFamily> column_families = 3;\n\n  // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in\n  // this table. Timestamps not matching the granularity will be rejected.\n  // Cannot be changed once the table is created.\n  TimestampGranularity granularity = 4;\n}\n\n// A set of columns within a table which share a common configuration.\nmessage ColumnFamily {\n  // A unique identifier of the form <table_name>/families/[-_.a-zA-Z0-9]+\n  // The last segment is the same as the \"name\" field in\n  // google.bigtable.v1.Family.\n  string name = 1;\n\n  // Garbage collection expression specified by the following grammar:\n  //   GC = EXPR\n  //      | \"\" ;\n  //   EXPR = EXPR, \"||\", EXPR              (* lowest precedence *)\n  //        | EXPR, \"&&\", EXPR\n  //        | \"(\", EXPR, \")\"                (* highest precedence *)\n  //        | PROP ;\n  //   PROP = \"version() >\", NUM32\n  //        | \"age() >\", NUM64, [ UNIT ] ;\n  //   NUM32 = non-zero-digit { digit } ;    (* # NUM32 <= 2^32 - 1 *)\n  //   NUM64 = non-zero-digit { digit } ;    (* # NUM64 <= 2^63 - 1 *)\n  //   UNIT =  \"d\" | \"h\" | \"m\"  (* d=days, h=hours, m=minutes, else micros *)\n  // GC expressions can be up to 500 characters in length\n  //\n  // The different types of PROP are defined as follows:\n  //   version() - cell index, counting from most recent and starting at 1\n  //   age() - age of the cell (current time minus cell timestamp)\n  //\n  // Example: \"version() > 3 || (age() > 3d && version() > 1)\"\n  //   drop cells beyond the most recent three, and drop cells older than three\n  //   days unless they're the most recent cell in the row/column\n  //\n  // Garbage collection executes opportunistically in the background, and so\n  // it's possible for reads to return a cell even if it matches the active GC\n  // expression for its family.\n  string gc_expression = 2;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto\n// DO NOT EDIT!\n\npackage google_bigtable_admin_table_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport google_bigtable_admin_table_v11 \"google.golang.org/cloud/bigtable/internal/table_data_proto\"\nimport google_protobuf \"google.golang.org/cloud/bigtable/internal/empty\"\n\nimport (\n\tcontext \"golang.org/x/net/context\"\n\tgrpc \"google.golang.org/grpc\"\n)\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ context.Context\nvar _ grpc.ClientConn\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\nfunc init() {\n}\n\n// Client API for BigtableTableService service\n\ntype BigtableTableServiceClient interface {\n\t// Creates a new table, to be served from a specified cluster.\n\t// The table can be created with a full set of initial column families,\n\t// specified in the request.\n\tCreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error)\n\t// Lists the names of all tables served from a specified cluster.\n\tListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error)\n\t// Gets the schema of the specified table, including its column families.\n\tGetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error)\n\t// Permanently deletes a specified table and all of its data.\n\tDeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)\n\t// Changes the name of a specified table.\n\t// Cannot be used to move tables between clusters, zones, or projects.\n\tRenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)\n\t// Creates a new column family within a specified table.\n\tCreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error)\n\t// Changes the configuration of a specified column family.\n\tUpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error)\n\t// Permanently deletes a specified column family and all of its data.\n\tDeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)\n}\n\ntype bigtableTableServiceClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewBigtableTableServiceClient(cc *grpc.ClientConn) BigtableTableServiceClient {\n\treturn &bigtableTableServiceClient{cc}\n}\n\nfunc (c *bigtableTableServiceClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) {\n\tout := new(google_bigtable_admin_table_v11.Table)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/CreateTable\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableTableServiceClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) {\n\tout := new(ListTablesResponse)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/ListTables\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableTableServiceClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) {\n\tout := new(google_bigtable_admin_table_v11.Table)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/GetTable\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableTableServiceClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {\n\tout := new(google_protobuf.Empty)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {\n\tout := new(google_protobuf.Empty)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/RenameTable\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableTableServiceClient) CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) {\n\tout := new(google_bigtable_admin_table_v11.ColumnFamily)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableTableServiceClient) UpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) {\n\tout := new(google_bigtable_admin_table_v11.ColumnFamily)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *bigtableTableServiceClient) DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {\n\tout := new(google_protobuf.Empty)\n\terr := grpc.Invoke(ctx, \"/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// Server API for BigtableTableService service\n\ntype BigtableTableServiceServer interface {\n\t// Creates a new table, to be served from a specified cluster.\n\t// The table can be created with a full set of initial column families,\n\t// specified in the request.\n\tCreateTable(context.Context, *CreateTableRequest) (*google_bigtable_admin_table_v11.Table, error)\n\t// Lists the names of all tables served from a specified cluster.\n\tListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error)\n\t// Gets the schema of the specified table, including its column families.\n\tGetTable(context.Context, *GetTableRequest) (*google_bigtable_admin_table_v11.Table, error)\n\t// Permanently deletes a specified table and all of its data.\n\tDeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf.Empty, error)\n\t// Changes the name of a specified table.\n\t// Cannot be used to move tables between clusters, zones, or projects.\n\tRenameTable(context.Context, *RenameTableRequest) (*google_protobuf.Empty, error)\n\t// Creates a new column family within a specified table.\n\tCreateColumnFamily(context.Context, *CreateColumnFamilyRequest) (*google_bigtable_admin_table_v11.ColumnFamily, error)\n\t// Changes the configuration of a specified column family.\n\tUpdateColumnFamily(context.Context, *google_bigtable_admin_table_v11.ColumnFamily) (*google_bigtable_admin_table_v11.ColumnFamily, error)\n\t// Permanently deletes a specified column family and all of its data.\n\tDeleteColumnFamily(context.Context, *DeleteColumnFamilyRequest) (*google_protobuf.Empty, error)\n}\n\nfunc RegisterBigtableTableServiceServer(s *grpc.Server, srv BigtableTableServiceServer) {\n\ts.RegisterService(&_BigtableTableService_serviceDesc, srv)\n}\n\nfunc _BigtableTableService_CreateTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(CreateTableRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).CreateTable(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableTableService_ListTables_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(ListTablesRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).ListTables(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableTableService_GetTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(GetTableRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).GetTable(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableTableService_DeleteTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(DeleteTableRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).DeleteTable(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableTableService_RenameTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(RenameTableRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).RenameTable(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableTableService_CreateColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(CreateColumnFamilyRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).CreateColumnFamily(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableTableService_UpdateColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(google_bigtable_admin_table_v11.ColumnFamily)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).UpdateColumnFamily(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _BigtableTableService_DeleteColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(DeleteColumnFamilyRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(BigtableTableServiceServer).DeleteColumnFamily(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nvar _BigtableTableService_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"google.bigtable.admin.table.v1.BigtableTableService\",\n\tHandlerType: (*BigtableTableServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"CreateTable\",\n\t\t\tHandler:    _BigtableTableService_CreateTable_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ListTables\",\n\t\t\tHandler:    _BigtableTableService_ListTables_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"GetTable\",\n\t\t\tHandler:    _BigtableTableService_GetTable_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DeleteTable\",\n\t\t\tHandler:    _BigtableTableService_DeleteTable_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"RenameTable\",\n\t\t\tHandler:    _BigtableTableService_RenameTable_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"CreateColumnFamily\",\n\t\t\tHandler:    _BigtableTableService_CreateColumnFamily_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"UpdateColumnFamily\",\n\t\t\tHandler:    _BigtableTableService_UpdateColumnFamily_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DeleteColumnFamily\",\n\t\t\tHandler:    _BigtableTableService_DeleteColumnFamily_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{},\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.admin.table.v1;\n\nimport \"google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto\";\nimport \"google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto\";\nimport \"google.golang.org/cloud/bigtable/internal/empty/empty.proto\";\n\noption java_generic_services = true;\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableTableServicesProto\";\noption java_package = \"com.google.bigtable.admin.table.v1\";\n\n\n// Service for creating, configuring, and deleting Cloud Bigtable tables.\n// Provides access to the table schemas only, not the data stored within the tables.\nservice BigtableTableService {\n  // Creates a new table, to be served from a specified cluster.\n  // The table can be created with a full set of initial column families,\n  // specified in the request.\n  rpc CreateTable(CreateTableRequest) returns (Table) {\n  }\n\n  // Lists the names of all tables served from a specified cluster.\n  rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {\n  }\n\n  // Gets the schema of the specified table, including its column families.\n  rpc GetTable(GetTableRequest) returns (Table) {\n  }\n\n  // Permanently deletes a specified table and all of its data.\n  rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {\n  }\n\n  // Changes the name of a specified table.\n  // Cannot be used to move tables between clusters, zones, or projects.\n  rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) {\n  }\n\n  // Creates a new column family within a specified table.\n  rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) {\n  }\n\n  // Changes the configuration of a specified column family.\n  rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) {\n  }\n\n  // Permanently deletes a specified column family and all of its data.\n  rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) {\n  }\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto\n// DO NOT EDIT!\n\n/*\nPackage google_bigtable_admin_table_v1 is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgoogle.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto\n\tgoogle.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto\n\nIt has these top-level messages:\n\tCreateTableRequest\n\tListTablesRequest\n\tListTablesResponse\n\tGetTableRequest\n\tDeleteTableRequest\n\tRenameTableRequest\n\tCreateColumnFamilyRequest\n\tDeleteColumnFamilyRequest\n*/\npackage google_bigtable_admin_table_v1\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport google_bigtable_admin_table_v11 \"google.golang.org/cloud/bigtable/internal/table_data_proto\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\ntype CreateTableRequest struct {\n\t// The unique name of the cluster in which to create the new table.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The name by which the new table should be referred to within the cluster,\n\t// e.g. \"foobar\" rather than \"<cluster_name>/tables/foobar\".\n\tTableId string `protobuf:\"bytes,2,opt,name=table_id\" json:\"table_id,omitempty\"`\n\t// The Table to create. The `name` field of the Table and all of its\n\t// ColumnFamilies must be left blank, and will be populated in the response.\n\tTable *google_bigtable_admin_table_v11.Table `protobuf:\"bytes,3,opt,name=table\" json:\"table,omitempty\"`\n}\n\nfunc (m *CreateTableRequest) Reset()         { *m = CreateTableRequest{} }\nfunc (m *CreateTableRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateTableRequest) ProtoMessage()    {}\n\nfunc (m *CreateTableRequest) GetTable() *google_bigtable_admin_table_v11.Table {\n\tif m != nil {\n\t\treturn m.Table\n\t}\n\treturn nil\n}\n\ntype ListTablesRequest struct {\n\t// The unique name of the cluster for which tables should be listed.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *ListTablesRequest) Reset()         { *m = ListTablesRequest{} }\nfunc (m *ListTablesRequest) String() string { return proto.CompactTextString(m) }\nfunc (*ListTablesRequest) ProtoMessage()    {}\n\ntype ListTablesResponse struct {\n\t// The tables present in the requested cluster.\n\t// At present, only the names of the tables are populated.\n\tTables []*google_bigtable_admin_table_v11.Table `protobuf:\"bytes,1,rep,name=tables\" json:\"tables,omitempty\"`\n}\n\nfunc (m *ListTablesResponse) Reset()         { *m = ListTablesResponse{} }\nfunc (m *ListTablesResponse) String() string { return proto.CompactTextString(m) }\nfunc (*ListTablesResponse) ProtoMessage()    {}\n\nfunc (m *ListTablesResponse) GetTables() []*google_bigtable_admin_table_v11.Table {\n\tif m != nil {\n\t\treturn m.Tables\n\t}\n\treturn nil\n}\n\ntype GetTableRequest struct {\n\t// The unique name of the requested table.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *GetTableRequest) Reset()         { *m = GetTableRequest{} }\nfunc (m *GetTableRequest) String() string { return proto.CompactTextString(m) }\nfunc (*GetTableRequest) ProtoMessage()    {}\n\ntype DeleteTableRequest struct {\n\t// The unique name of the table to be deleted.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *DeleteTableRequest) Reset()         { *m = DeleteTableRequest{} }\nfunc (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteTableRequest) ProtoMessage()    {}\n\ntype RenameTableRequest struct {\n\t// The current unique name of the table.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The new name by which the table should be referred to within its containing\n\t// cluster, e.g. \"foobar\" rather than \"<cluster_name>/tables/foobar\".\n\tNewId string `protobuf:\"bytes,2,opt,name=new_id\" json:\"new_id,omitempty\"`\n}\n\nfunc (m *RenameTableRequest) Reset()         { *m = RenameTableRequest{} }\nfunc (m *RenameTableRequest) String() string { return proto.CompactTextString(m) }\nfunc (*RenameTableRequest) ProtoMessage()    {}\n\ntype CreateColumnFamilyRequest struct {\n\t// The unique name of the table in which to create the new column family.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The name by which the new column family should be referred to within the\n\t// table, e.g. \"foobar\" rather than \"<table_name>/columnFamilies/foobar\".\n\tColumnFamilyId string `protobuf:\"bytes,2,opt,name=column_family_id\" json:\"column_family_id,omitempty\"`\n\t// The column family to create. The `name` field must be left blank.\n\tColumnFamily *google_bigtable_admin_table_v11.ColumnFamily `protobuf:\"bytes,3,opt,name=column_family\" json:\"column_family,omitempty\"`\n}\n\nfunc (m *CreateColumnFamilyRequest) Reset()         { *m = CreateColumnFamilyRequest{} }\nfunc (m *CreateColumnFamilyRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CreateColumnFamilyRequest) ProtoMessage()    {}\n\nfunc (m *CreateColumnFamilyRequest) GetColumnFamily() *google_bigtable_admin_table_v11.ColumnFamily {\n\tif m != nil {\n\t\treturn m.ColumnFamily\n\t}\n\treturn nil\n}\n\ntype DeleteColumnFamilyRequest struct {\n\t// The unique name of the column family to be deleted.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n}\n\nfunc (m *DeleteColumnFamilyRequest) Reset()         { *m = DeleteColumnFamilyRequest{} }\nfunc (m *DeleteColumnFamilyRequest) String() string { return proto.CompactTextString(m) }\nfunc (*DeleteColumnFamilyRequest) ProtoMessage()    {}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto",
    "content": "// Copyright (c) 2015, Google Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.bigtable.admin.table.v1;\n\nimport \"google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto\";\n\noption java_multiple_files = true;\noption java_outer_classname = \"BigtableTableServiceMessagesProto\";\noption java_package = \"com.google.bigtable.admin.table.v1\";\n\n\nmessage CreateTableRequest {\n  // The unique name of the cluster in which to create the new table.\n  string name = 1;\n\n  // The name by which the new table should be referred to within the cluster,\n  // e.g. \"foobar\" rather than \"<cluster_name>/tables/foobar\".\n  string table_id = 2;\n\n  // The Table to create. The `name` field of the Table and all of its\n  // ColumnFamilies must be left blank, and will be populated in the response.\n  Table table = 3;\n}\n\nmessage ListTablesRequest {\n  // The unique name of the cluster for which tables should be listed.\n  string name = 1;\n}\n\nmessage ListTablesResponse {\n  // The tables present in the requested cluster.\n  // At present, only the names of the tables are populated.\n  repeated Table tables = 1;\n}\n\nmessage GetTableRequest {\n  // The unique name of the requested table.\n  string name = 1;\n}\n\nmessage DeleteTableRequest {\n  // The unique name of the table to be deleted.\n  string name = 1;\n}\n\nmessage RenameTableRequest {\n  // The current unique name of the table.\n  string name = 1;\n\n  // The new name by which the table should be referred to within its containing\n  // cluster, e.g. \"foobar\" rather than \"<cluster_name>/tables/foobar\".\n  string new_id = 2;\n}\n\nmessage CreateColumnFamilyRequest {\n  // The unique name of the table in which to create the new column family.\n  string name = 1;\n\n  // The name by which the new column family should be referred to within the\n  // table, e.g. \"foobar\" rather than \"<table_name>/columnFamilies/foobar\".\n  string column_family_id = 2;\n\n  // The column family to create. The `name` field must be left blank.\n  ColumnFamily column_family = 3;\n}\n\nmessage DeleteColumnFamilyRequest {\n  // The unique name of the column family to be deleted.\n  string name = 1;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/bigtable/sample/search.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This is a sample web server that uses Cloud Bigtable as the storage layer\n// for a simple document-storage and full-text-search service.\n// It has three functions:\n// - Add a document.  This adds the content of a user-supplied document to the\n//   Bigtable, and adds references to the document to an index in the Bigtable.\n//   The document is indexed under each unique word in the document.\n// - Search the index.  This returns documents containing each word in a user\n//   query, with snippets and links to view the whole document.\n// - Clear the table.  This deletes and recreates the Bigtable,\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/cloud/bigtable\"\n)\n\nvar (\n\tproject   = flag.String(\"project\", \"\", \"The name of the project.\")\n\tzone      = flag.String(\"zone\", \"\", \"The zone of the project.\")\n\tcluster   = flag.String(\"cluster\", \"\", \"The name of the Cloud Bigtable cluster.\")\n\ttableName = flag.String(\"table\", \"docindex\", \"The name of the table containing the documents and index.\")\n\tcredFile  = flag.String(\"creds\", \"\", \"File containing credentials\")\n\trebuild   = flag.Bool(\"rebuild\", false, \"Rebuild the table from scratch on startup.\")\n\n\tclient      *bigtable.Client\n\tadminClient *bigtable.AdminClient\n\ttable       *bigtable.Table\n\n\taddTemplate = template.Must(template.New(\"\").Parse(`<html><body>\nAdded {{.Title}}\n</body></html>`))\n\n\tcontentTemplate = template.Must(template.New(\"\").Parse(`<html><body>\n<b>{{.Title}}</b><br><br>\n{{.Content}}\n</body></html>`))\n\n\tsearchTemplate = template.Must(template.New(\"\").Parse(`<html><body>\nResults for <b>{{.Query}}</b>:<br><br>\n{{range .Results}}\n<a href=\"/content?name={{.Title}}\">{{.Title}}</a><br>\n<i>{{.Snippet}}</i><br><br>\n{{end}}\n</body></html>`))\n)\n\nconst (\n\t// prototypeTableName is an existing table containing some documents.\n\t// Rebuilding a table will populate it with the data from this table.\n\tprototypeTableName  = \"shakespearetemplate\"\n\tindexColumnFamily   = \"i\"\n\tcontentColumnFamily = \"c\"\n\tmainPage            = `\n\t<html>\n\t\t<head>\n\t\t\t<title>Document Search</title>\n\t\t</head>\n\t\t<body>\n\t\t\tSearch for documents:\n\t\t\t<form action=\"/search\" method=\"post\">\n\t\t\t\t<div><input type=\"text\" name=\"q\" size=80></div>\n\t\t\t\t<div><input type=\"submit\" value=\"Search\"></div>\n\t\t\t</form>\n\n\t\t\tAdd a document:\n\t\t\t<form action=\"/add\" method=\"post\">\n\t\t\t\tDocument name:\n\t\t\t\t<div><textarea name=\"name\" rows=\"1\" cols=\"80\"></textarea></div>\n\t\t\t\tDocument text:\n\t\t\t\t<div><textarea name=\"content\" rows=\"20\" cols=\"80\"></textarea></div>\n\t\t\t\t<div><input type=\"submit\" value=\"Submit\"></div>\n\t\t\t</form>\n\n\t\t\tRebuild table:\n\t\t\t<form action=\"/clearindex\" method=\"post\">\n\t\t\t\t<div><input type=\"submit\" value=\"Rebuild\"></div>\n\t\t\t</form>\n\t\t</body>\n\t</html>\n\t`\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tif *tableName == prototypeTableName {\n\t\tlog.Fatal(\"Can't use \" + prototypeTableName + \" as your table.\")\n\t}\n\n\t// Let the library get credentials from file.\n\tos.Setenv(\"GOOGLE_APPLICATION_CREDENTIALS\", *credFile)\n\n\t// Make an admin client.\n\tvar err error\n\tif adminClient, err = bigtable.NewAdminClient(context.Background(), *project, *zone, *cluster); err != nil {\n\t\tlog.Fatal(\"Bigtable NewAdminClient:\", err)\n\t}\n\n\t// Make a regular client.\n\tclient, err = bigtable.NewClient(context.Background(), *project, *zone, *cluster)\n\tif err != nil {\n\t\tlog.Fatal(\"Bigtable NewClient:\", err)\n\t}\n\n\t// Open the table.\n\ttable = client.Open(*tableName)\n\n\t// Rebuild the table if the command-line flag is set.\n\tif *rebuild {\n\t\tif err := rebuildTable(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t// Set up HTML handlers, and start the web server.\n\thttp.HandleFunc(\"/search\", handleSearch)\n\thttp.HandleFunc(\"/content\", handleContent)\n\thttp.HandleFunc(\"/add\", handleAddDoc)\n\thttp.HandleFunc(\"/clearindex\", handleClear)\n\thttp.HandleFunc(\"/\", handleMain)\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}\n\n// handleMain outputs the home page, containing a search box, an \"add document\" box, and \"clear table\" button.\nfunc handleMain(w http.ResponseWriter, r *http.Request) {\n\tio.WriteString(w, mainPage)\n}\n\n// tokenize splits a string into tokens.\n// This is very simple, it's not a good tokenization function.\nfunc tokenize(s string) []string {\n\twordMap := make(map[string]bool)\n\tf := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) })\n\tfor _, word := range f {\n\t\tword = strings.ToLower(word)\n\t\twordMap[word] = true\n\t}\n\twords := make([]string, 0, len(wordMap))\n\tfor word := range wordMap {\n\t\twords = append(words, word)\n\t}\n\treturn words\n}\n\n// handleContent fetches the content of a document from the Bigtable and returns it.\nfunc handleContent(w http.ResponseWriter, r *http.Request) {\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\tname := r.FormValue(\"name\")\n\tif len(name) == 0 {\n\t\thttp.Error(w, \"No document name supplied.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\trow, err := table.ReadRow(ctx, name)\n\tif err != nil {\n\t\thttp.Error(w, \"Error reading content: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tcontent := row[contentColumnFamily]\n\tif len(content) == 0 {\n\t\thttp.Error(w, \"Document not found.\", http.StatusNotFound)\n\t\treturn\n\t}\n\tvar buf bytes.Buffer\n\tif err := contentTemplate.ExecuteTemplate(&buf, \"\", struct{ Title, Content string }{name, string(content[0].Value)}); err != nil {\n\t\thttp.Error(w, \"Error executing HTML template: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.Copy(w, &buf)\n}\n\n// handleSearch responds to search queries, returning links and snippets for matching documents.\nfunc handleSearch(w http.ResponseWriter, r *http.Request) {\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\tquery := r.FormValue(\"q\")\n\t// Split the query into words.\n\twords := tokenize(query)\n\tif len(words) == 0 {\n\t\thttp.Error(w, \"Empty query.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// readRows reads from many rows concurrently.\n\treadRows := func(rows []string) ([]bigtable.Row, error) {\n\t\tresults := make([]bigtable.Row, len(rows))\n\t\terrors := make([]error, len(rows))\n\t\tvar wg sync.WaitGroup\n\t\tfor i, row := range rows {\n\t\t\twg.Add(1)\n\t\t\tgo func(i int, row string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tresults[i], errors[i] = table.ReadRow(ctx, row)\n\t\t\t}(i, row)\n\t\t}\n\t\twg.Wait()\n\t\tfor _, err := range errors {\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn results, nil\n\t}\n\n\t// For each query word, get the list of documents containing it.\n\tresults, err := readRows(words)\n\tif err != nil {\n\t\thttp.Error(w, \"Error reading index: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Count how many of the query words each result contained.\n\thits := make(map[string]int)\n\tfor _, r := range results {\n\t\tfor _, r := range r[indexColumnFamily] {\n\t\t\thits[r.Column]++\n\t\t}\n\t}\n\n\t// Build a slice of all the documents that matched every query word.\n\tvar matches []string\n\tfor doc, count := range hits {\n\t\tif count == len(words) {\n\t\t\tmatches = append(matches, doc[len(indexColumnFamily+\":\"):])\n\t\t}\n\t}\n\n\t// Fetch the content of those documents from the Bigtable.\n\tcontent, err := readRows(matches)\n\tif err != nil {\n\t\thttp.Error(w, \"Error reading results: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ttype result struct{ Title, Snippet string }\n\tdata := struct {\n\t\tQuery   string\n\t\tResults []result\n\t}{query, nil}\n\n\t// Output links and snippets.\n\tfor i, doc := range matches {\n\t\tvar text string\n\t\tc := content[i][contentColumnFamily]\n\t\tif len(c) > 0 {\n\t\t\ttext = string(c[0].Value)\n\t\t}\n\t\tif len(text) > 100 {\n\t\t\ttext = text[:100] + \"...\"\n\t\t}\n\t\tdata.Results = append(data.Results, result{doc, text})\n\t}\n\tvar buf bytes.Buffer\n\tif err := searchTemplate.ExecuteTemplate(&buf, \"\", data); err != nil {\n\t\thttp.Error(w, \"Error executing HTML template: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.Copy(w, &buf)\n}\n\n// handleAddDoc adds a document to the index.\nfunc handleAddDoc(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"POST requests only\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tctx, _ := context.WithTimeout(context.Background(), time.Minute)\n\n\tname := r.FormValue(\"name\")\n\tif len(name) == 0 {\n\t\thttp.Error(w, \"Empty document name!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcontent := r.FormValue(\"content\")\n\tif len(content) == 0 {\n\t\thttp.Error(w, \"Empty document content!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar (\n\t\twriteErr error          // Set if any write fails.\n\t\tmu       sync.Mutex     // Protects writeErr\n\t\twg       sync.WaitGroup // Used to wait for all writes to finish.\n\t)\n\n\t// writeOneColumn writes one column in one row, updates err if there is an error,\n\t// and signals wg that one operation has finished.\n\twriteOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) {\n\t\tmut := bigtable.NewMutation()\n\t\tmut.Set(family, column, ts, []byte(value))\n\t\terr := table.Apply(ctx, row, mut)\n\t\tif err != nil {\n\t\t\tmu.Lock()\n\t\t\twriteErr = err\n\t\t\tmu.Unlock()\n\t\t}\n\t}\n\n\t// Start a write to store the document content.\n\twg.Add(1)\n\tgo func() {\n\t\twriteOneColumn(name, contentColumnFamily, \"\", content, bigtable.Now())\n\t\twg.Done()\n\t}()\n\n\t// Start writes to store the document name in the index for each word in the document.\n\twords := tokenize(content)\n\tfor _, word := range words {\n\t\tvar (\n\t\t\trow    = word\n\t\t\tfamily = indexColumnFamily\n\t\t\tcolumn = name\n\t\t\tvalue  = \"\"\n\t\t\tts     = bigtable.Now()\n\t\t)\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\t// TODO: should use a semaphore to limit the number of concurrent writes.\n\t\t\twriteOneColumn(row, family, column, value, ts)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tif writeErr != nil {\n\t\thttp.Error(w, \"Error writing to Bigtable: \"+writeErr.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar buf bytes.Buffer\n\tif err := addTemplate.ExecuteTemplate(&buf, \"\", struct{ Title string }{name}); err != nil {\n\t\thttp.Error(w, \"Error executing HTML template: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.Copy(w, &buf)\n}\n\n// rebuildTable deletes the table if it exists, then creates the table, with the index column family.\nfunc rebuildTable() error {\n\tctx, _ := context.WithTimeout(context.Background(), 5*time.Minute)\n\tadminClient.DeleteTable(ctx, *tableName)\n\tif err := adminClient.CreateTable(ctx, *tableName); err != nil {\n\t\treturn fmt.Errorf(\"CreateTable: %v\", err)\n\t}\n\ttime.Sleep(20 * time.Second)\n\tif err := adminClient.CreateColumnFamily(ctx, *tableName, indexColumnFamily); err != nil {\n\t\treturn fmt.Errorf(\"CreateColumnFamily: %v\", err)\n\t}\n\tif err := adminClient.CreateColumnFamily(ctx, *tableName, contentColumnFamily); err != nil {\n\t\treturn fmt.Errorf(\"CreateColumnFamily: %v\", err)\n\t}\n\n\t// Open the prototype table.  It contains a number of documents to get started with.\n\tprototypeTable := client.Open(prototypeTableName)\n\n\tvar (\n\t\twriteErr error          // Set if any write fails.\n\t\tmu       sync.Mutex     // Protects writeErr\n\t\twg       sync.WaitGroup // Used to wait for all writes to finish.\n\t)\n\tcopyRowToTable := func(row bigtable.Row) bool {\n\t\tmu.Lock()\n\t\tfailed := writeErr != nil\n\t\tmu.Unlock()\n\t\tif failed {\n\t\t\treturn false\n\t\t}\n\t\tmut := bigtable.NewMutation()\n\t\tfor family, items := range row {\n\t\t\tfor _, item := range items {\n\t\t\t\t// Get the column name, excluding the column family name and ':' character.\n\t\t\t\tcolumnWithoutFamily := item.Column[len(family)+1:]\n\t\t\t\tmut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value)\n\t\t\t}\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\t// TODO: should use a semaphore to limit the number of concurrent writes.\n\t\t\tif err := table.Apply(ctx, row.Key(), mut); err != nil {\n\t\t\t\tmu.Lock()\n\t\t\t\twriteErr = err\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t\treturn true\n\t}\n\n\t// Create a filter that only accepts the column families we're interested in.\n\tfilter := bigtable.FamilyFilter(indexColumnFamily + \"|\" + contentColumnFamily)\n\t// Read every row from prototypeTable, and call copyRowToTable to copy it to our table.\n\terr := prototypeTable.ReadRows(ctx, bigtable.InfiniteRange(\"\"), copyRowToTable, bigtable.RowFilter(filter))\n\twg.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn writeErr\n}\n\n// handleClear calls rebuildTable\nfunc handleClear(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"POST requests only\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif err := rebuildTable(); err != nil {\n\t\thttp.Error(w, \"Failed to rebuild index: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprint(w, \"Rebuilt index.\\n\")\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/cloud.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package cloud contains Google Cloud Platform APIs related types\n// and common functions.\npackage cloud\n\nimport (\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/cloud/internal\"\n)\n\n// NewContext returns a new context that uses the provided http.Client.\n// Provided http.Client is responsible to authorize and authenticate\n// the requests made to the Google Cloud APIs.\n// It mutates the client's original Transport to append the cloud\n// package's user-agent to the outgoing requests.\n// You can obtain the project ID from the Google Developers Console,\n// https://console.developers.google.com.\nfunc NewContext(projID string, c *http.Client) context.Context {\n\tif c == nil {\n\t\tpanic(\"invalid nil *http.Client passed to NewContext\")\n\t}\n\treturn WithContext(context.Background(), projID, c)\n}\n\n// WithContext returns a new context in a similar way NewContext does,\n// but initiates the new context with the specified parent.\nfunc WithContext(parent context.Context, projID string, c *http.Client) context.Context {\n\t// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.\n\t// Do User-Agent some other way.\n\tif _, ok := c.Transport.(*internal.Transport); !ok {\n\t\tc.Transport = &internal.Transport{Base: c.Transport}\n\t}\n\treturn internal.WithContext(parent, projID, c)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/compute/metadata/metadata.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package metadata provides access to Google Compute Engine (GCE)\n// metadata and API service accounts.\n//\n// This package is a wrapper around the GCE metadata service,\n// as documented at https://developers.google.com/compute/docs/metadata.\npackage metadata\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org/cloud/internal\"\n)\n\ntype cachedValue struct {\n\tk    string\n\ttrim bool\n\tmu   sync.Mutex\n\tv    string\n}\n\nvar (\n\tprojID  = &cachedValue{k: \"project/project-id\", trim: true}\n\tprojNum = &cachedValue{k: \"project/numeric-project-id\", trim: true}\n\tinstID  = &cachedValue{k: \"instance/id\", trim: true}\n)\n\nvar metaClient = &http.Client{\n\tTransport: &internal.Transport{\n\t\tBase: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout:   750 * time.Millisecond,\n\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t}).Dial,\n\t\t\tResponseHeaderTimeout: 750 * time.Millisecond,\n\t\t},\n\t},\n}\n\n// NotDefinedError is returned when requested metadata is not defined.\n//\n// The underlying string is the suffix after \"/computeMetadata/v1/\".\n//\n// This error is not returned if the value is defined to be the empty\n// string.\ntype NotDefinedError string\n\nfunc (suffix NotDefinedError) Error() string {\n\treturn fmt.Sprintf(\"metadata: GCE metadata %q not defined\", string(suffix))\n}\n\n// Get returns a value from the metadata service.\n// The suffix is appended to \"http://${GCE_METADATA_HOST}/computeMetadata/v1/\".\n//\n// If the GCE_METADATA_HOST environment variable is not defined, a default of\n// 169.254.169.254 will be used instead.\n//\n// If the requested metadata is not defined, the returned error will\n// be of type NotDefinedError.\nfunc Get(suffix string) (string, error) {\n\t// Using a fixed IP makes it very difficult to spoof the metadata service in\n\t// a container, which is an important use-case for local testing of cloud\n\t// deployments. To enable spoofing of the metadata service, the environment\n\t// variable GCE_METADATA_HOST is first inspected to decide where metadata\n\t// requests shall go.\n\thost := os.Getenv(\"GCE_METADATA_HOST\")\n\tif host == \"\" {\n\t\t// Using 169.254.169.254 instead of \"metadata\" here because Go\n\t\t// binaries built with the \"netgo\" tag and without cgo won't\n\t\t// know the search suffix for \"metadata\" is\n\t\t// \".google.internal\", and this IP address is documented as\n\t\t// being stable anyway.\n\t\thost = \"169.254.169.254\"\n\t}\n\turl := \"http://\" + host + \"/computeMetadata/v1/\" + suffix\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Set(\"Metadata-Flavor\", \"Google\")\n\tres, err := metaClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode == http.StatusNotFound {\n\t\treturn \"\", NotDefinedError(suffix)\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn \"\", fmt.Errorf(\"status code %d trying to fetch %s\", res.StatusCode, url)\n\t}\n\tall, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(all), nil\n}\n\nfunc getTrimmed(suffix string) (s string, err error) {\n\ts, err = Get(suffix)\n\ts = strings.TrimSpace(s)\n\treturn\n}\n\nfunc (c *cachedValue) get() (v string, err error) {\n\tdefer c.mu.Unlock()\n\tc.mu.Lock()\n\tif c.v != \"\" {\n\t\treturn c.v, nil\n\t}\n\tif c.trim {\n\t\tv, err = getTrimmed(c.k)\n\t} else {\n\t\tv, err = Get(c.k)\n\t}\n\tif err == nil {\n\t\tc.v = v\n\t}\n\treturn\n}\n\nvar onGCE struct {\n\tsync.Mutex\n\tset bool\n\tv   bool\n}\n\n// OnGCE reports whether this process is running on Google Compute Engine.\nfunc OnGCE() bool {\n\tdefer onGCE.Unlock()\n\tonGCE.Lock()\n\tif onGCE.set {\n\t\treturn onGCE.v\n\t}\n\tonGCE.set = true\n\n\t// We use the DNS name of the metadata service here instead of the IP address\n\t// because we expect that to fail faster in the not-on-GCE case.\n\tres, err := metaClient.Get(\"http://metadata.google.internal\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tonGCE.v = res.Header.Get(\"Metadata-Flavor\") == \"Google\"\n\treturn onGCE.v\n}\n\n// ProjectID returns the current instance's project ID string.\nfunc ProjectID() (string, error) { return projID.get() }\n\n// NumericProjectID returns the current instance's numeric project ID.\nfunc NumericProjectID() (string, error) { return projNum.get() }\n\n// InternalIP returns the instance's primary internal IP address.\nfunc InternalIP() (string, error) {\n\treturn getTrimmed(\"instance/network-interfaces/0/ip\")\n}\n\n// ExternalIP returns the instance's primary external (public) IP address.\nfunc ExternalIP() (string, error) {\n\treturn getTrimmed(\"instance/network-interfaces/0/access-configs/0/external-ip\")\n}\n\n// Hostname returns the instance's hostname. This will be of the form\n// \"<instanceID>.c.<projID>.internal\".\nfunc Hostname() (string, error) {\n\treturn getTrimmed(\"instance/hostname\")\n}\n\n// InstanceTags returns the list of user-defined instance tags,\n// assigned when initially creating a GCE instance.\nfunc InstanceTags() ([]string, error) {\n\tvar s []string\n\tj, err := Get(\"instance/tags\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}\n\n// InstanceID returns the current VM's numeric instance ID.\nfunc InstanceID() (string, error) {\n\treturn instID.get()\n}\n\n// InstanceName returns the current VM's instance ID string.\nfunc InstanceName() (string, error) {\n\thost, err := Hostname()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.Split(host, \".\")[0], nil\n}\n\n// Zone returns the current VM's zone, such as \"us-central1-b\".\nfunc Zone() (string, error) {\n\tzone, err := getTrimmed(\"instance/zone\")\n\t// zone is of the form \"projects/<projNum>/zones/<zoneName>\".\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn zone[strings.LastIndex(zone, \"/\")+1:], nil\n}\n\n// InstanceAttributes returns the list of user-defined attributes,\n// assigned when initially creating a GCE VM instance. The value of an\n// attribute can be obtained with InstanceAttributeValue.\nfunc InstanceAttributes() ([]string, error) { return lines(\"instance/attributes/\") }\n\n// ProjectAttributes returns the list of user-defined attributes\n// applying to the project as a whole, not just this VM.  The value of\n// an attribute can be obtained with ProjectAttributeValue.\nfunc ProjectAttributes() ([]string, error) { return lines(\"project/attributes/\") }\n\nfunc lines(suffix string) ([]string, error) {\n\tj, err := Get(suffix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts := strings.Split(strings.TrimSpace(j), \"\\n\")\n\tfor i := range s {\n\t\ts[i] = strings.TrimSpace(s[i])\n\t}\n\treturn s, nil\n}\n\n// InstanceAttributeValue returns the value of the provided VM\n// instance attribute.\n//\n// If the requested attribute is not defined, the returned error will\n// be of type NotDefinedError.\n//\n// InstanceAttributeValue may return (\"\", nil) if the attribute was\n// defined to be the empty string.\nfunc InstanceAttributeValue(attr string) (string, error) {\n\treturn Get(\"instance/attributes/\" + attr)\n}\n\n// ProjectAttributeValue returns the value of the provided\n// project attribute.\n//\n// If the requested attribute is not defined, the returned error will\n// be of type NotDefinedError.\n//\n// ProjectAttributeValue may return (\"\", nil) if the attribute was\n// defined to be the empty string.\nfunc ProjectAttributeValue(attr string) (string, error) {\n\treturn Get(\"project/attributes/\" + attr)\n}\n\n// Scopes returns the service account scopes for the given account.\n// The account may be empty or the string \"default\" to use the instance's\n// main account.\nfunc Scopes(serviceAccount string) ([]string, error) {\n\tif serviceAccount == \"\" {\n\t\tserviceAccount = \"default\"\n\t}\n\treturn lines(\"instance/service-accounts/\" + serviceAccount + \"/scopes\")\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/container/container.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package container contains a Google Container Engine client.\n//\n// For more information about the API,\n// see https://cloud.google.com/container-engine/docs\npackage container\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\traw \"google.golang.org/api/container/v1beta1\"\n\t\"google.golang.org/cloud/internal\"\n)\n\ntype Type string\n\nvar (\n\tTypeCreate Type = Type(\"createCluster\")\n\tTypeDelete Type = Type(\"deleteCluster\")\n)\n\ntype Status string\n\nvar (\n\tDone         = Status(\"done\")\n\tPending      = Status(\"pending\")\n\tRunning      = Status(\"running\")\n\tError        = Status(\"error\")\n\tProvisioning = Status(\"provisioning\")\n\tStopping     = Status(\"stopping\")\n)\n\n// Resource is a Google Container Engine cluster resource.\ntype Resource struct {\n\t// Name is the name of this cluster. The name must be unique\n\t// within this project and zone, and can be up to 40 characters.\n\tName string\n\n\t// Description is the description of the cluster. Optional.\n\tDescription string\n\n\t// Zone is the Google Compute Engine zone in which the cluster resides.\n\tZone string\n\n\t// Status is the current status of the cluster. It could either be\n\t// StatusError, StatusProvisioning, StatusRunning or StatusStopping.\n\tStatus Status\n\n\t// Num is the number of the nodes in this cluster resource.\n\tNum int64\n\n\t// APIVersion is the version of the Kubernetes master and kubelets running\n\t// in this cluster. Allowed value is 0.4.2, or leave blank to\n\t// pick up the latest stable release.\n\tAPIVersion string\n\n\t// Endpoint is the IP address of this cluster's Kubernetes master.\n\t// The endpoint can be accessed at https://username:password@endpoint/.\n\t// See Username and Password fields for the username and password information.\n\tEndpoint string\n\n\t// Username is the username to use when accessing the Kubernetes master endpoint.\n\tUsername string\n\n\t// Password is the password to use when accessing the Kubernetes master endpoint.\n\tPassword string\n\n\t// ContainerIPv4CIDR is the IP addresses of the container pods in\n\t// this cluster, in CIDR notation (e.g. 1.2.3.4/29).\n\tContainerIPv4CIDR string\n\n\t// ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this\n\t// cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are\n\t// always in the 10.0.0.0/16 range.\n\tServicesIPv4CIDR string\n\n\t// MachineType is a Google Compute Engine machine type (e.g. n1-standard-1).\n\t// If none set, the default type is used while creating a new cluster.\n\tMachineType string\n\n\t// SourceImage is the fully-specified name of a Google Compute Engine image.\n\t// For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD.\n\tSourceImage string\n\n\t// Created is the creation time of this cluster.\n\tCreated time.Time\n}\n\nfunc resourceFromRaw(c *raw.Cluster) *Resource {\n\tif c == nil {\n\t\treturn nil\n\t}\n\tr := &Resource{\n\t\tName:              c.Name,\n\t\tDescription:       c.Description,\n\t\tZone:              c.Zone,\n\t\tStatus:            Status(c.Status),\n\t\tNum:               c.NumNodes,\n\t\tAPIVersion:        c.ClusterApiVersion,\n\t\tEndpoint:          c.Endpoint,\n\t\tUsername:          c.MasterAuth.User,\n\t\tPassword:          c.MasterAuth.Password,\n\t\tContainerIPv4CIDR: c.ContainerIpv4Cidr,\n\t\tServicesIPv4CIDR:  c.ServicesIpv4Cidr,\n\t\tMachineType:       c.NodeConfig.MachineType,\n\t\tSourceImage:       c.NodeConfig.SourceImage,\n\t}\n\tr.Created, _ = time.Parse(time.RFC3339, c.CreationTimestamp)\n\treturn r\n}\n\nfunc resourcesFromRaw(c []*raw.Cluster) []*Resource {\n\tr := make([]*Resource, len(c))\n\tfor i, val := range c {\n\t\tr[i] = resourceFromRaw(val)\n\t}\n\treturn r\n}\n\n// Op represents a Google Container Engine API operation.\ntype Op struct {\n\t// Name is the name of the operation.\n\tName string\n\n\t// Zone is the Google Compute Engine zone.\n\tZone string\n\n\t// TargetURL is the URL of the cluster resource\n\t// that this operation is associated with.\n\tTargetURL string\n\n\t// Type is the operation type. It could be either be TypeCreate or TypeDelete.\n\tType Type\n\n\t// Status is the current status of this operation. It could be either\n\t// OpDone or OpPending.\n\tStatus Status\n}\n\nfunc opFromRaw(o *raw.Operation) *Op {\n\tif o == nil {\n\t\treturn nil\n\t}\n\treturn &Op{\n\t\tName:      o.Name,\n\t\tZone:      o.Zone,\n\t\tTargetURL: o.Target,\n\t\tType:      Type(o.OperationType),\n\t\tStatus:    Status(o.Status),\n\t}\n}\n\nfunc opsFromRaw(o []*raw.Operation) []*Op {\n\tops := make([]*Op, len(o))\n\tfor i, val := range o {\n\t\tops[i] = opFromRaw(val)\n\t}\n\treturn ops\n}\n\n// Clusters returns a list of cluster resources from the specified zone.\n// If no zone is specified, it returns all clusters under the user project.\nfunc Clusters(ctx context.Context, zone string) ([]*Resource, error) {\n\ts := rawService(ctx)\n\tif zone == \"\" {\n\t\tresp, err := s.Projects.Clusters.List(internal.ProjID(ctx)).Do()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn resourcesFromRaw(resp.Clusters), nil\n\t}\n\tresp, err := s.Projects.Zones.Clusters.List(internal.ProjID(ctx), zone).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resourcesFromRaw(resp.Clusters), nil\n}\n\n// Cluster returns metadata about the specified cluster.\nfunc Cluster(ctx context.Context, zone, name string) (*Resource, error) {\n\ts := rawService(ctx)\n\tresp, err := s.Projects.Zones.Clusters.Get(internal.ProjID(ctx), zone, name).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resourceFromRaw(resp), nil\n}\n\n// CreateCluster creates a new cluster with the provided metadata\n// in the specified zone.\nfunc CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) {\n\tpanic(\"not implemented\")\n}\n\n// DeleteCluster deletes a cluster.\nfunc DeleteCluster(ctx context.Context, zone, name string) error {\n\ts := rawService(ctx)\n\t_, err := s.Projects.Zones.Clusters.Delete(internal.ProjID(ctx), zone, name).Do()\n\treturn err\n}\n\n// Operations returns a list of operations from the specified zone.\n// If no zone is specified, it looks up for all of the operations\n// that are running under the user's project.\nfunc Operations(ctx context.Context, zone string) ([]*Op, error) {\n\ts := rawService(ctx)\n\tif zone == \"\" {\n\t\tresp, err := s.Projects.Operations.List(internal.ProjID(ctx)).Do()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn opsFromRaw(resp.Operations), nil\n\t}\n\tresp, err := s.Projects.Zones.Operations.List(internal.ProjID(ctx), zone).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn opsFromRaw(resp.Operations), nil\n}\n\n// Operation returns an operation.\nfunc Operation(ctx context.Context, zone, name string) (*Op, error) {\n\ts := rawService(ctx)\n\tresp, err := s.Projects.Zones.Operations.Get(internal.ProjID(ctx), zone, name).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.ErrorMessage != \"\" {\n\t\treturn nil, errors.New(resp.ErrorMessage)\n\t}\n\treturn opFromRaw(resp), nil\n}\n\nfunc rawService(ctx context.Context) *raw.Service {\n\treturn internal.Service(ctx, \"container\", func(hc *http.Client) interface{} {\n\t\tsvc, _ := raw.New(hc)\n\t\treturn svc\n\t}).(*raw.Service)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/datastore.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package datastore contains a Google Cloud Datastore client.\n//\n// This package is experimental and may make backwards-incompatible changes.\npackage datastore\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"reflect\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/cloud/internal\"\n\tpb \"google.golang.org/cloud/internal/datastore\"\n)\n\n// ContextKey represents a context key specific to the datastore\ntype ContextKey string\n\nconst (\n\t// ScopeDatastore grants permissions to view and/or manage datastore entities\n\tScopeDatastore = \"https://www.googleapis.com/auth/datastore\"\n\n\t// ScopeUserEmail grants permission to view the user's email address.\n\t// It is required to access the datastore\n\tScopeUserEmail = \"https://www.googleapis.com/auth/userinfo.email\"\n)\n\nvar (\n\t// ErrInvalidEntityType is returned when functions like Get or Next are\n\t// passed a dst or src argument of invalid type.\n\tErrInvalidEntityType = errors.New(\"datastore: invalid entity type\")\n\t// ErrInvalidKey is returned when an invalid key is presented.\n\tErrInvalidKey = errors.New(\"datastore: invalid key\")\n\t// ErrNoSuchEntity is returned when no entity was found for a given key.\n\tErrNoSuchEntity = errors.New(\"datastore: no such entity\")\n)\n\ntype multiArgType int\n\nconst (\n\tmultiArgTypeInvalid multiArgType = iota\n\tmultiArgTypePropertyLoadSaver\n\tmultiArgTypeStruct\n\tmultiArgTypeStructPtr\n\tmultiArgTypeInterface\n)\n\n// nsKey is the type of the context.Context key to store the datastore\n// namespace.\ntype nsKey struct{}\n\n// WithNamespace returns a new context that limits the scope its parent\n// context with a Datastore namespace.\nfunc WithNamespace(parent context.Context, namespace string) context.Context {\n\treturn context.WithValue(parent, nsKey{}, namespace)\n}\n\n// ctxNamespace returns the active namespace for a context.\n// It defaults to \"\" if no namespace was specified.\nfunc ctxNamespace(ctx context.Context) string {\n\tv, _ := ctx.Value(nsKey{}).(string)\n\treturn v\n}\n\n// ErrFieldMismatch is returned when a field is to be loaded into a different\n// type than the one it was stored from, or when a field is missing or\n// unexported in the destination struct.\n// StructType is the type of the struct pointed to by the destination argument\n// passed to Get or to Iterator.Next.\ntype ErrFieldMismatch struct {\n\tStructType reflect.Type\n\tFieldName  string\n\tReason     string\n}\n\n// errHTTP is returned when responds is a non-200 HTTP response.\ntype errHTTP struct {\n\tStatusCode int\n\tBody       string\n\terr        error\n}\n\nfunc (e *errHTTP) Error() string {\n\tif e.err == nil {\n\t\treturn fmt.Sprintf(\"error during call, http status code: %v %s\", e.StatusCode, e.Body)\n\t}\n\treturn e.err.Error()\n}\n\nfunc (e *ErrFieldMismatch) Error() string {\n\treturn fmt.Sprintf(\"datastore: cannot load field %q into a %q: %s\",\n\t\te.FieldName, e.StructType, e.Reason)\n}\n\n// baseUrl gets the base url active for the datastore service\n// defaults to \"https://www.googleapis.com/datastore/v1beta2/datasets/\" if none was specified\nfunc baseUrl(ctx context.Context) string {\n\tv := ctx.Value(ContextKey(\"base_url\"))\n\tif v == nil {\n\t\treturn \"https://www.googleapis.com/datastore/v1beta2/datasets/\"\n\t} else {\n\t\treturn v.(string)\n\t}\n}\n\nfunc call(ctx context.Context, method string, req proto.Message, resp proto.Message) error {\n\tpayload, err := proto.Marshal(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\turl := baseUrl(ctx) + internal.ProjID(ctx) + \"/\" + method\n\tr, err := internal.HTTPClient(ctx).Post(url, \"application/x-protobuf\", bytes.NewReader(payload))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\tall, err := ioutil.ReadAll(r.Body)\n\tif r.StatusCode != http.StatusOK {\n\t\te := &errHTTP{\n\t\t\tStatusCode: r.StatusCode,\n\t\t\terr:        err,\n\t\t}\n\t\tif err == nil {\n\t\t\te.Body = string(all)\n\t\t}\n\t\treturn e\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = proto.Unmarshal(all, resp); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc keyToProto(k *Key) *pb.Key {\n\tif k == nil {\n\t\treturn nil\n\t}\n\n\t// TODO(jbd): Eliminate unrequired allocations.\n\tpath := []*pb.Key_PathElement(nil)\n\tfor {\n\t\tel := &pb.Key_PathElement{\n\t\t\tKind: proto.String(k.kind),\n\t\t}\n\t\tif k.id != 0 {\n\t\t\tel.Id = proto.Int64(k.id)\n\t\t}\n\t\tif k.name != \"\" {\n\t\t\tel.Name = proto.String(k.name)\n\t\t}\n\t\tpath = append([]*pb.Key_PathElement{el}, path...)\n\t\tif k.parent == nil {\n\t\t\tbreak\n\t\t}\n\t\tk = k.parent\n\t}\n\tkey := &pb.Key{\n\t\tPathElement: path,\n\t}\n\tif k.namespace != \"\" {\n\t\tkey.PartitionId = &pb.PartitionId{\n\t\t\tNamespace: proto.String(k.namespace),\n\t\t}\n\t}\n\treturn key\n}\n\nfunc protoToKey(p *pb.Key) *Key {\n\tkeys := make([]*Key, len(p.GetPathElement()))\n\tfor i, el := range p.GetPathElement() {\n\t\tkeys[i] = &Key{\n\t\t\tnamespace: p.GetPartitionId().GetNamespace(),\n\t\t\tkind:      el.GetKind(),\n\t\t\tid:        el.GetId(),\n\t\t\tname:      el.GetName(),\n\t\t}\n\t}\n\tfor i := 0; i < len(keys)-1; i++ {\n\t\tkeys[i+1].parent = keys[i]\n\t}\n\treturn keys[len(keys)-1]\n}\n\n// multiKeyToProto is a batch version of keyToProto.\nfunc multiKeyToProto(keys []*Key) []*pb.Key {\n\tret := make([]*pb.Key, len(keys))\n\tfor i, k := range keys {\n\t\tret[i] = keyToProto(k)\n\t}\n\treturn ret\n}\n\n// multiKeyToProto is a batch version of keyToProto.\nfunc multiProtoToKey(keys []*pb.Key) []*Key {\n\tret := make([]*Key, len(keys))\n\tfor i, k := range keys {\n\t\tret[i] = protoToKey(k)\n\t}\n\treturn ret\n}\n\n// multiValid is a batch version of Key.valid. It returns an error, not a\n// []bool.\nfunc multiValid(key []*Key) error {\n\tinvalid := false\n\tfor _, k := range key {\n\t\tif !k.valid() {\n\t\t\tinvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !invalid {\n\t\treturn nil\n\t}\n\terr := make(MultiError, len(key))\n\tfor i, k := range key {\n\t\tif !k.valid() {\n\t\t\terr[i] = ErrInvalidKey\n\t\t}\n\t}\n\treturn err\n}\n\n// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct\n// type S, for some interface type I, or some non-interface non-pointer type P\n// such that P or *P implements PropertyLoadSaver.\n//\n// It returns what category the slice's elements are, and the reflect.Type\n// that represents S, I or P.\n//\n// As a special case, PropertyList is an invalid type for v.\nfunc checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {\n\tif v.Kind() != reflect.Slice {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\tif v.Type() == typeOfPropertyList {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\telemType = v.Type().Elem()\n\tif reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {\n\t\treturn multiArgTypePropertyLoadSaver, elemType\n\t}\n\tswitch elemType.Kind() {\n\tcase reflect.Struct:\n\t\treturn multiArgTypeStruct, elemType\n\tcase reflect.Interface:\n\t\treturn multiArgTypeInterface, elemType\n\tcase reflect.Ptr:\n\t\telemType = elemType.Elem()\n\t\tif elemType.Kind() == reflect.Struct {\n\t\t\treturn multiArgTypeStructPtr, elemType\n\t\t}\n\t}\n\treturn multiArgTypeInvalid, nil\n}\n\n// Get loads the entity stored for key into dst, which must be a struct pointer\n// or implement PropertyLoadSaver. If there is no such entity for the key, Get\n// returns ErrNoSuchEntity.\n//\n// The values of dst's unmatched struct fields are not modified, and matching\n// slice-typed fields are not reset before appending to them. In particular, it\n// is recommended to pass a pointer to a zero valued struct on each Get call.\n//\n// ErrFieldMismatch is returned when a field is to be loaded into a different\n// type than the one it was stored from, or when a field is missing or\n// unexported in the destination struct. ErrFieldMismatch is only returned if\n// dst is a struct pointer.\nfunc Get(ctx context.Context, key *Key, dst interface{}) error {\n\terr := get(ctx, []*Key{key}, []interface{}{dst}, nil)\n\tif me, ok := err.(MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// GetMulti is a batch version of Get.\n//\n// dst must be a []S, []*S, []I or []P, for some struct type S, some interface\n// type I, or some non-interface non-pointer type P such that P or *P\n// implements PropertyLoadSaver. If an []I, each element must be a valid dst\n// for Get: it must be a struct pointer or implement PropertyLoadSaver.\n//\n// As a special case, PropertyList is an invalid type for dst, even though a\n// PropertyList is a slice of structs. It is treated as invalid to avoid being\n// mistakenly passed when []PropertyList was intended.\nfunc GetMulti(ctx context.Context, keys []*Key, dst interface{}) error {\n\treturn get(ctx, keys, dst, nil)\n}\n\nfunc get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error {\n\tv := reflect.ValueOf(dst)\n\tmultiArgType, _ := checkMultiArg(v)\n\n\t// Sanity checks\n\tif multiArgType == multiArgTypeInvalid {\n\t\treturn errors.New(\"datastore: dst has invalid type\")\n\t}\n\tif len(keys) != v.Len() {\n\t\treturn errors.New(\"datastore: keys and dst slices have different length\")\n\t}\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\n\t// Go through keys, validate them, serialize then, and create a dict mapping them to their index\n\tmultiErr, any := make(MultiError, len(keys)), false\n\tkeyMap := make(map[string]int)\n\tpbKeys := make([]*pb.Key, len(keys))\n\tfor i, k := range keys {\n\t\tif !k.valid() {\n\t\t\tmultiErr[i] = ErrInvalidKey\n\t\t\tany = true\n\t\t} else {\n\t\t\tkeyMap[k.String()] = i\n\t\t\tpbKeys[i] = keyToProto(k)\n\t\t}\n\t}\n\tif any {\n\t\treturn multiErr\n\t}\n\treq := &pb.LookupRequest{\n\t\tKey:         pbKeys,\n\t\tReadOptions: opts,\n\t}\n\tresp := &pb.LookupResponse{}\n\tif err := call(ctx, \"lookup\", req, resp); err != nil {\n\t\treturn err\n\t}\n\tif len(resp.Deferred) > 0 {\n\t\t// TODO(jbd): Assess whether we should retry the deferred keys.\n\t\treturn errors.New(\"datastore: some entities temporarily unavailable\")\n\t}\n\tif len(keys) != len(resp.Found)+len(resp.Missing) {\n\t\treturn errors.New(\"datastore: internal error: server returned the wrong number of entities\")\n\t}\n\tfor _, e := range resp.Found {\n\t\tk := protoToKey(e.Entity.Key)\n\t\tindex := keyMap[k.String()]\n\t\telem := v.Index(index)\n\t\tif multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {\n\t\t\telem = elem.Addr()\n\t\t}\n\t\terr := loadEntity(elem.Interface(), e.Entity)\n\t\tif err != nil {\n\t\t\tmultiErr[index] = err\n\t\t\tany = true\n\t\t}\n\t}\n\tfor _, e := range resp.Missing {\n\t\tk := protoToKey(e.Entity.Key)\n\t\tmultiErr[keyMap[k.String()]] = ErrNoSuchEntity\n\t\tany = true\n\t}\n\tif any {\n\t\treturn multiErr\n\t}\n\treturn nil\n}\n\n// Put saves the entity src into the datastore with key k. src must be a struct\n// pointer or implement PropertyLoadSaver; if a struct pointer then any\n// unexported fields of that struct will be skipped. If k is an incomplete key,\n// the returned key will be a unique key generated by the datastore.\nfunc Put(ctx context.Context, key *Key, src interface{}) (*Key, error) {\n\tk, err := PutMulti(ctx, []*Key{key}, []interface{}{src})\n\tif err != nil {\n\t\tif me, ok := err.(MultiError); ok {\n\t\t\treturn nil, me[0]\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn k[0], nil\n}\n\n// PutMulti is a batch version of Put.\n//\n// src must satisfy the same conditions as the dst argument to GetMulti.\nfunc PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {\n\tmutation, err := putMutation(keys, src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Make the request.\n\treq := &pb.CommitRequest{\n\t\tMutation: mutation,\n\t\tMode:     pb.CommitRequest_NON_TRANSACTIONAL.Enum(),\n\t}\n\tresp := &pb.CommitResponse{}\n\tif err := call(ctx, \"commit\", req, resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Copy any newly minted keys into the returned keys.\n\tnewKeys := make(map[int]int) // Map of index in returned slice to index in response.\n\tret := make([]*Key, len(keys))\n\tvar idx int\n\tfor i, key := range keys {\n\t\tif key.Incomplete() {\n\t\t\t// This key will be in the mutation result.\n\t\t\tnewKeys[i] = idx\n\t\t\tidx++\n\t\t} else {\n\t\t\tret[i] = key\n\t\t}\n\t}\n\tif len(newKeys) != len(resp.MutationResult.InsertAutoIdKey) {\n\t\treturn nil, errors.New(\"datastore: internal error: server returned the wrong number of keys\")\n\t}\n\tfor retI, respI := range newKeys {\n\t\tret[retI] = protoToKey(resp.MutationResult.InsertAutoIdKey[respI])\n\t}\n\treturn ret, nil\n}\n\nfunc putMutation(keys []*Key, src interface{}) (*pb.Mutation, error) {\n\tv := reflect.ValueOf(src)\n\tmultiArgType, _ := checkMultiArg(v)\n\tif multiArgType == multiArgTypeInvalid {\n\t\treturn nil, errors.New(\"datastore: src has invalid type\")\n\t}\n\tif len(keys) != v.Len() {\n\t\treturn nil, errors.New(\"datastore: key and src slices have different length\")\n\t}\n\tif len(keys) == 0 {\n\t\treturn nil, nil\n\t}\n\tif err := multiValid(keys); err != nil {\n\t\treturn nil, err\n\t}\n\tvar upsert, insert []*pb.Entity\n\tfor i, k := range keys {\n\t\tval := reflect.ValueOf(src).Index(i)\n\t\t// If src is an interface slice []interface{}{ent1, ent2}\n\t\tif val.Kind() == reflect.Interface && val.Elem().Kind() == reflect.Slice {\n\t\t\tval = val.Elem()\n\t\t}\n\t\t// If src is a slice of ptrs []*T{ent1, ent2}\n\t\tif val.Kind() == reflect.Ptr && val.Elem().Kind() == reflect.Slice {\n\t\t\tval = val.Elem()\n\t\t}\n\t\tp, err := saveEntity(k, val.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"datastore: Error while saving %v: %v\", k.String(), err)\n\t\t}\n\t\tif k.Incomplete() {\n\t\t\tinsert = append(insert, p)\n\t\t} else {\n\t\t\tupsert = append(upsert, p)\n\t\t}\n\t}\n\n\treturn &pb.Mutation{\n\t\tInsertAutoId: insert,\n\t\tUpsert:       upsert,\n\t}, nil\n}\n\n// Delete deletes the entity for the given key.\nfunc Delete(ctx context.Context, key *Key) error {\n\terr := DeleteMulti(ctx, []*Key{key})\n\tif me, ok := err.(MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// DeleteMulti is a batch version of Delete.\nfunc DeleteMulti(ctx context.Context, keys []*Key) error {\n\tmutation, err := deleteMutation(keys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := &pb.CommitRequest{\n\t\tMutation: mutation,\n\t\tMode:     pb.CommitRequest_NON_TRANSACTIONAL.Enum(),\n\t}\n\tresp := &pb.CommitResponse{}\n\treturn call(ctx, \"commit\", req, resp)\n}\n\nfunc deleteMutation(keys []*Key) (*pb.Mutation, error) {\n\tprotoKeys := make([]*pb.Key, len(keys))\n\tfor i, k := range keys {\n\t\tif k.Incomplete() {\n\t\t\treturn nil, fmt.Errorf(\"datastore: can't delete the incomplete key: %v\", k)\n\t\t}\n\t\tprotoKeys[i] = keyToProto(k)\n\t}\n\n\treturn &pb.Mutation{\n\t\tDelete: protoKeys,\n\t}, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/errors.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// This file provides error functions for common API failure modes.\n\npackage datastore\n\nimport (\n\t\"fmt\"\n)\n\n// MultiError is returned by batch operations when there are errors with\n// particular elements. Errors will be in a one-to-one correspondence with\n// the input elements; successful elements will have a nil entry.\ntype MultiError []error\n\nfunc (m MultiError) Error() string {\n\ts, n := \"\", 0\n\tfor _, e := range m {\n\t\tif e != nil {\n\t\t\tif n == 0 {\n\t\t\t\ts = e.Error()\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}\n\tswitch n {\n\tcase 0:\n\t\treturn \"(0 errors)\"\n\tcase 1:\n\t\treturn s\n\tcase 2:\n\t\treturn s + \" (and 1 other error)\"\n\t}\n\treturn fmt.Sprintf(\"%s (and %d other errors)\", s, n-1)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/key.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage datastore\n\nimport (\n\t\"bytes\"\n\t\"encoding/base64\"\n\t\"encoding/gob\"\n\t\"errors\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\tpb \"google.golang.org/cloud/internal/datastore\"\n)\n\n// Key represents the datastore key for a stored entity, and is immutable.\ntype Key struct {\n\tkind   string\n\tid     int64\n\tname   string\n\tparent *Key\n\n\tnamespace string\n}\n\nfunc (k *Key) Kind() string {\n\treturn k.kind\n}\n\nfunc (k *Key) ID() int64 {\n\treturn k.id\n}\n\nfunc (k *Key) Name() string {\n\treturn k.name\n}\n\nfunc (k *Key) Parent() *Key {\n\treturn k.parent\n}\n\nfunc (k *Key) SetParent(v *Key) {\n\tif v.Incomplete() {\n\t\tpanic(\"can't set an incomplete key as parent\")\n\t}\n\tk.parent = v\n}\n\nfunc (k *Key) Namespace() string {\n\treturn k.namespace\n}\n\n// Complete returns whether the key does not refer to a stored entity.\nfunc (k *Key) Incomplete() bool {\n\treturn k.name == \"\" && k.id == 0\n}\n\n// valid returns whether the key is valid.\nfunc (k *Key) valid() bool {\n\tif k == nil {\n\t\treturn false\n\t}\n\tfor ; k != nil; k = k.parent {\n\t\tif k.kind == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tif k.name != \"\" && k.id != 0 {\n\t\t\treturn false\n\t\t}\n\t\tif k.parent != nil {\n\t\t\tif k.parent.Incomplete() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif k.parent.namespace != k.namespace {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (k *Key) Equal(o *Key) bool {\n\tfor {\n\t\tif k == nil || o == nil {\n\t\t\treturn k == o // if either is nil, both must be nil\n\t\t}\n\t\tif k.namespace != o.namespace || k.name != o.name || k.id != o.id || k.kind != o.kind {\n\t\t\treturn false\n\t\t}\n\t\tif k.parent == nil && o.parent == nil {\n\t\t\treturn true\n\t\t}\n\t\tk = k.parent\n\t\to = o.parent\n\t}\n}\n\n// marshal marshals the key's string representation to the buffer.\nfunc (k *Key) marshal(b *bytes.Buffer) {\n\tif k.parent != nil {\n\t\tk.parent.marshal(b)\n\t}\n\tb.WriteByte('/')\n\tb.WriteString(k.kind)\n\tb.WriteByte(',')\n\tif k.name != \"\" {\n\t\tb.WriteString(k.name)\n\t} else {\n\t\tb.WriteString(strconv.FormatInt(k.id, 10))\n\t}\n}\n\n// String returns a string representation of the key.\nfunc (k *Key) String() string {\n\tif k == nil {\n\t\treturn \"\"\n\t}\n\tb := bytes.NewBuffer(make([]byte, 0, 512))\n\tk.marshal(b)\n\treturn b.String()\n}\n\n// Note: Fields not renamed compared to appengine gobKey struct\n// This ensures gobs created by appengine can be read here, and vice/versa\ntype gobKey struct {\n\tKind      string\n\tStringID  string\n\tIntID     int64\n\tParent    *gobKey\n\tAppID     string\n\tNamespace string\n}\n\nfunc keyToGobKey(k *Key) *gobKey {\n\tif k == nil {\n\t\treturn nil\n\t}\n\treturn &gobKey{\n\t\tKind:      k.kind,\n\t\tStringID:  k.name,\n\t\tIntID:     k.id,\n\t\tParent:    keyToGobKey(k.parent),\n\t\tNamespace: k.namespace,\n\t}\n}\n\nfunc gobKeyToKey(gk *gobKey) *Key {\n\tif gk == nil {\n\t\treturn nil\n\t}\n\treturn &Key{\n\t\tkind:      gk.Kind,\n\t\tname:      gk.StringID,\n\t\tid:        gk.IntID,\n\t\tparent:    gobKeyToKey(gk.Parent),\n\t\tnamespace: gk.Namespace,\n\t}\n}\n\nfunc (k *Key) GobEncode() ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\tif err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}\n\nfunc (k *Key) GobDecode(buf []byte) error {\n\tgk := new(gobKey)\n\tif err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {\n\t\treturn err\n\t}\n\t*k = *gobKeyToKey(gk)\n\treturn nil\n}\n\nfunc (k *Key) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + k.Encode() + `\"`), nil\n}\n\nfunc (k *Key) UnmarshalJSON(buf []byte) error {\n\tif len(buf) < 2 || buf[0] != '\"' || buf[len(buf)-1] != '\"' {\n\t\treturn errors.New(\"datastore: bad JSON key\")\n\t}\n\tk2, err := DecodeKey(string(buf[1 : len(buf)-1]))\n\tif err != nil {\n\t\treturn err\n\t}\n\t*k = *k2\n\treturn nil\n}\n\n// Encode returns an opaque representation of the key\n// suitable for use in HTML and URLs.\n// This is compatible with the Python and Java runtimes.\nfunc (k *Key) Encode() string {\n\tpKey := keyToProto(k)\n\n\tb, err := proto.Marshal(pKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Trailing padding is stripped.\n\treturn strings.TrimRight(base64.URLEncoding.EncodeToString(b), \"=\")\n}\n\n// DecodeKey decodes a key from the opaque representation returned by Encode.\nfunc DecodeKey(encoded string) (*Key, error) {\n\t// Re-add padding.\n\tif m := len(encoded) % 4; m != 0 {\n\t\tencoded += strings.Repeat(\"=\", 4-m)\n\t}\n\n\tb, err := base64.URLEncoding.DecodeString(encoded)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpKey := new(pb.Key)\n\tif err := proto.Unmarshal(b, pKey); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn protoToKey(pKey), nil\n}\n\n// NewIncompleteKey creates a new incomplete key.\n// kind cannot be empty.\nfunc NewIncompleteKey(ctx context.Context, kind string, parent *Key) *Key {\n\treturn NewKey(ctx, kind, \"\", 0, parent)\n}\n\n// NewKey creates a new key.\n// kind cannot be empty.\n// Either one or both of stringID and intID must be zero. If both are zero,\n// the key returned is incomplete.\n// parent must either be a complete key or nil.\nfunc NewKey(ctx context.Context, kind, name string, id int64, parent *Key) *Key {\n\treturn &Key{\n\t\tkind:      kind,\n\t\tname:      name,\n\t\tid:        id,\n\t\tparent:    parent,\n\t\tnamespace: ctxNamespace(ctx),\n\t}\n}\n\n// AllocateIDs accepts a slice of incomplete keys and returns a\n// slice of complete keys that are guaranteed to be valid in the datastore\nfunc AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) {\n\tif keys == nil {\n\t\treturn nil, nil\n\t}\n\n\treq := &pb.AllocateIdsRequest{Key: multiKeyToProto(keys)}\n\tres := &pb.AllocateIdsResponse{}\n\tif err := call(ctx, \"allocateIds\", req, res); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn multiProtoToKey(res.Key), nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/load.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage datastore\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n\n\tpb \"google.golang.org/cloud/internal/datastore\"\n)\n\nvar (\n\ttypeOfByteSlice = reflect.TypeOf([]byte(nil))\n\ttypeOfTime      = reflect.TypeOf(time.Time{})\n)\n\n// typeMismatchReason returns a string explaining why the property p could not\n// be stored in an entity field of type v.Type().\nfunc typeMismatchReason(p Property, v reflect.Value) string {\n\tentityType := \"empty\"\n\tswitch p.Value.(type) {\n\tcase int64:\n\t\tentityType = \"int\"\n\tcase bool:\n\t\tentityType = \"bool\"\n\tcase string:\n\t\tentityType = \"string\"\n\tcase float64:\n\t\tentityType = \"float\"\n\tcase *Key:\n\t\tentityType = \"*datastore.Key\"\n\tcase time.Time:\n\t\tentityType = \"time.Time\"\n\tcase []byte:\n\t\tentityType = \"[]byte\"\n\t}\n\n\treturn fmt.Sprintf(\"type mismatch: %s versus %v\", entityType, v.Type())\n}\n\ntype propertyLoader struct {\n\t// m holds the number of times a substruct field like \"Foo.Bar.Baz\" has\n\t// been seen so far. The map is constructed lazily.\n\tm map[string]int\n}\n\nfunc (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string {\n\tvar sliceOk bool\n\tvar v reflect.Value\n\t// Traverse a struct's struct-typed fields.\n\tfor name := p.Name; ; {\n\t\tdecoder, ok := codec.byName[name]\n\t\tif !ok {\n\t\t\treturn \"no such struct field\"\n\t\t}\n\t\tv = structValue.Field(decoder.index)\n\t\tif !v.IsValid() {\n\t\t\treturn \"no such struct field\"\n\t\t}\n\t\tif !v.CanSet() {\n\t\t\treturn \"cannot set struct field\"\n\t\t}\n\n\t\tif decoder.substructCodec == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif v.Kind() == reflect.Slice {\n\t\t\tif l.m == nil {\n\t\t\t\tl.m = make(map[string]int)\n\t\t\t}\n\t\t\tindex := l.m[p.Name]\n\t\t\tl.m[p.Name] = index + 1\n\t\t\tfor v.Len() <= index {\n\t\t\t\tv.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))\n\t\t\t}\n\t\t\tstructValue = v.Index(index)\n\t\t\tsliceOk = true\n\t\t} else {\n\t\t\tstructValue = v\n\t\t}\n\t\t// Strip the \"I.\" from \"I.X\".\n\t\tname = name[len(codec.byIndex[decoder.index].name):]\n\t\tcodec = decoder.substructCodec\n\t}\n\n\tvar slice reflect.Value\n\tif v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {\n\t\tslice = v\n\t\tv = reflect.New(v.Type().Elem()).Elem()\n\t} else if _, ok := prev[p.Name]; ok && !sliceOk {\n\t\t// Zero the field back out that was set previously, turns out its a slice and we don't know what to do with it\n\t\tv.Set(reflect.Zero(v.Type()))\n\n\t\treturn \"multiple-valued property requires a slice field type\"\n\t}\n\n\tprev[p.Name] = struct{}{}\n\n\tpValue := p.Value\n\tswitch v.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tx, ok := pValue.(int64)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif v.OverflowInt(x) {\n\t\t\treturn fmt.Sprintf(\"value %v overflows struct field of type %v\", x, v.Type())\n\t\t}\n\t\tv.SetInt(x)\n\tcase reflect.Bool:\n\t\tx, ok := pValue.(bool)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tv.SetBool(x)\n\tcase reflect.String:\n\t\tx, ok := pValue.(string)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tv.SetString(x)\n\tcase reflect.Float32, reflect.Float64:\n\t\tx, ok := pValue.(float64)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif v.OverflowFloat(x) {\n\t\t\treturn fmt.Sprintf(\"value %v overflows struct field of type %v\", x, v.Type())\n\t\t}\n\t\tv.SetFloat(x)\n\tcase reflect.Ptr:\n\t\tx, ok := pValue.(*Key)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif _, ok := v.Interface().(*Key); !ok {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tv.Set(reflect.ValueOf(x))\n\tcase reflect.Struct:\n\t\tswitch v.Type() {\n\t\tcase typeOfTime:\n\t\t\tx, ok := pValue.(time.Time)\n\t\t\tif !ok && pValue != nil {\n\t\t\t\treturn typeMismatchReason(p, v)\n\t\t\t}\n\t\t\tv.Set(reflect.ValueOf(x))\n\t\tdefault:\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\tcase reflect.Slice:\n\t\tx, ok := pValue.([]byte)\n\t\tif !ok && pValue != nil {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn typeMismatchReason(p, v)\n\t\t}\n\t\tv.SetBytes(x)\n\tdefault:\n\t\treturn typeMismatchReason(p, v)\n\t}\n\tif slice.IsValid() {\n\t\tslice.Set(reflect.Append(slice, v))\n\t}\n\treturn \"\"\n}\n\n// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.\nfunc loadEntity(dst interface{}, src *pb.Entity) (err error) {\n\tprops := protoToProperties(src)\n\tif e, ok := dst.(PropertyLoadSaver); ok {\n\t\treturn e.Load(props)\n\t}\n\treturn LoadStruct(dst, props)\n}\n\nfunc (s structPLS) Load(props []Property) error {\n\tvar fieldName, reason string\n\tvar l propertyLoader\n\n\tprev := make(map[string]struct{})\n\tfor _, p := range props {\n\t\tif errStr := l.load(s.codec, s.v, p, prev); errStr != \"\" {\n\t\t\t// We don't return early, as we try to load as many properties as possible.\n\t\t\t// It is valid to load an entity into a struct that cannot fully represent it.\n\t\t\t// That case returns an error, but the caller is free to ignore it.\n\t\t\tfieldName, reason = p.Name, errStr\n\t\t}\n\t}\n\tif reason != \"\" {\n\t\treturn &ErrFieldMismatch{\n\t\t\tStructType: s.v.Type(),\n\t\t\tFieldName:  fieldName,\n\t\t\tReason:     reason,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc protoToProperties(src *pb.Entity) []Property {\n\tprops := src.Property\n\tout := make([]Property, 0, len(props))\n\tfor {\n\t\tvar (\n\t\t\tx       *pb.Property\n\t\t\tnoIndex bool\n\t\t)\n\t\tif len(props) > 0 {\n\t\t\tx, props = props[0], props[1:]\n\t\t\tnoIndex = !x.GetValue().GetIndexed()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t\tif x.Value.ListValue == nil {\n\t\t\tout = append(out, Property{\n\t\t\t\tName:     x.GetName(),\n\t\t\t\tValue:    propValue(x.Value),\n\t\t\t\tNoIndex:  noIndex,\n\t\t\t\tMultiple: false,\n\t\t\t})\n\t\t} else {\n\t\t\tfor _, v := range x.Value.ListValue {\n\t\t\t\tout = append(out, Property{\n\t\t\t\t\tName:     x.GetName(),\n\t\t\t\t\tValue:    propValue(v),\n\t\t\t\t\tNoIndex:  noIndex,\n\t\t\t\t\tMultiple: true,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\treturn out\n}\n\n// propValue returns a Go value that combines the raw PropertyValue with a\n// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.\nfunc propValue(v *pb.Value) interface{} {\n\t//TODO(PSG-Luna): Support EntityValue\n\t//TODO(PSG-Luna): GeoPoint seems gone from the v1 proto, reimplement it once it's readded\n\tswitch {\n\tcase v.IntegerValue != nil:\n\t\treturn *v.IntegerValue\n\tcase v.TimestampMicrosecondsValue != nil:\n\t\treturn fromUnixMicro(*v.TimestampMicrosecondsValue)\n\tcase v.BooleanValue != nil:\n\t\treturn *v.BooleanValue\n\tcase v.StringValue != nil:\n\t\treturn *v.StringValue\n\tcase v.BlobValue != nil:\n\t\treturn []byte(v.BlobValue)\n\tcase v.BlobKeyValue != nil:\n\t\treturn *v.BlobKeyValue\n\tcase v.DoubleValue != nil:\n\t\treturn *v.DoubleValue\n\tcase v.KeyValue != nil:\n\t\treturn protoToKey(v.KeyValue)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/prop.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage datastore\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"unicode\"\n)\n\n// Entities with more than this many indexed properties will not be saved.\nconst maxIndexedProperties = 5000\n\n// []byte fields more than 1 megabyte long will not be loaded or saved.\nconst maxBlobLen = 1 << 20\n\n// Property is a name/value pair plus some metadata. A datastore entity's\n// contents are loaded and saved as a sequence of Properties. An entity can\n// have multiple Properties with the same name, provided that p.Multiple is\n// true on all of that entity's Properties with that name.\ntype Property struct {\n\t// Name is the property name.\n\tName string\n\t// Value is the property value. The valid types are:\n\t//\t- int64\n\t//\t- bool\n\t//\t- string\n\t//\t- float64\n\t//\t- *Key\n\t//\t- time.Time\n\t//\t- []byte (up to 1 megabyte in length)\n\t// This set is smaller than the set of valid struct field types that the\n\t// datastore can load and save. A Property Value cannot be a slice (apart\n\t// from []byte); use multiple Properties instead. Also, a Value's type\n\t// must be explicitly on the list above; it is not sufficient for the\n\t// underlying type to be on that list. For example, a Value of \"type\n\t// myInt64 int64\" is invalid. Smaller-width integers and floats are also\n\t// invalid. Again, this is more restrictive than the set of valid struct\n\t// field types.\n\t//\n\t// A Value will have an opaque type when loading entities from an index,\n\t// such as via a projection query. Load entities into a struct instead\n\t// of a PropertyLoadSaver when using a projection query.\n\t//\n\t// A Value may also be the nil interface value; this is equivalent to\n\t// Python's None but not directly representable by a Go struct. Loading\n\t// a nil-valued property into a struct will set that field to the zero\n\t// value.\n\tValue interface{}\n\t// NoIndex is whether the datastore cannot index this property.\n\t// If NoIndex is set to false, []byte values are limited to 1500 bytes and\n\t// string values are limited to 1500 bytes.\n\tNoIndex bool\n\t// Multiple is whether the entity can have multiple properties with\n\t// the same name. Even if a particular instance only has one property with\n\t// a certain name, Multiple should be true if a struct would best represent\n\t// it as a field of type []T instead of type T.\n\tMultiple bool\n}\n\n// PropertyLoadSaver can be converted from and to a slice of Properties.\ntype PropertyLoadSaver interface {\n\tLoad([]Property) error\n\tSave() ([]Property, error)\n}\n\n// PropertyList converts a []Property to implement PropertyLoadSaver.\ntype PropertyList []Property\n\nvar (\n\ttypeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()\n\ttypeOfPropertyList      = reflect.TypeOf(PropertyList(nil))\n)\n\n// Load loads all of the provided properties into l.\n// It does not first reset *l to an empty slice.\nfunc (l *PropertyList) Load(p []Property) error {\n\t*l = append(*l, p...)\n\treturn nil\n}\n\n// Save saves all of l's properties as a slice of Properties.\nfunc (l *PropertyList) Save() ([]Property, error) {\n\treturn *l, nil\n}\n\n// validPropertyName returns whether name consists of one or more valid Go\n// identifiers joined by \".\".\nfunc validPropertyName(name string) bool {\n\tif name == \"\" {\n\t\treturn false\n\t}\n\tfor _, s := range strings.Split(name, \".\") {\n\t\tif s == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tfirst := true\n\t\tfor _, c := range s {\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t\tif c != '_' && !unicode.IsLetter(c) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\n// structTag is the parsed `datastore:\"name,options\"` tag of a struct field.\n// If a field has no tag, or the tag has an empty name, then the structTag's\n// name is just the field name. A \"-\" name means that the datastore ignores\n// that field.\ntype structTag struct {\n\tname    string\n\tnoIndex bool\n}\n\n// structCodec describes how to convert a struct to and from a sequence of\n// properties.\ntype structCodec struct {\n\t// byIndex gives the structTag for the i'th field.\n\tbyIndex []structTag\n\t// byName gives the field codec for the structTag with the given name.\n\tbyName map[string]fieldCodec\n\t// hasSlice is whether a struct or any of its nested or embedded structs\n\t// has a slice-typed field (other than []byte).\n\thasSlice bool\n\t// complete is whether the structCodec is complete. An incomplete\n\t// structCodec may be encountered when walking a recursive struct.\n\tcomplete bool\n}\n\n// fieldCodec is a struct field's index and, if that struct field's type is\n// itself a struct, that substruct's structCodec.\ntype fieldCodec struct {\n\tindex          int\n\tsubstructCodec *structCodec\n}\n\n// structCodecs collects the structCodecs that have already been calculated.\nvar (\n\tstructCodecsMutex sync.Mutex\n\tstructCodecs      = make(map[reflect.Type]*structCodec)\n)\n\n// getStructCodec returns the structCodec for the given struct type.\nfunc getStructCodec(t reflect.Type) (*structCodec, error) {\n\tstructCodecsMutex.Lock()\n\tdefer structCodecsMutex.Unlock()\n\treturn getStructCodecLocked(t)\n}\n\n// getStructCodecLocked implements getStructCodec. The structCodecsMutex must\n// be held when calling this function.\nfunc getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {\n\tc, ok := structCodecs[t]\n\tif ok {\n\t\treturn c, nil\n\t}\n\tc = &structCodec{\n\t\tbyIndex: make([]structTag, t.NumField()),\n\t\tbyName:  make(map[string]fieldCodec),\n\t}\n\n\t// Add c to the structCodecs map before we are sure it is good. If t is\n\t// a recursive type, it needs to find the incomplete entry for itself in\n\t// the map.\n\tstructCodecs[t] = c\n\tdefer func() {\n\t\tif retErr != nil {\n\t\t\tdelete(structCodecs, t)\n\t\t}\n\t}()\n\n\tfor i := range c.byIndex {\n\t\tf := t.Field(i)\n\t\tname, opts := f.Tag.Get(\"datastore\"), \"\"\n\t\tif i := strings.Index(name, \",\"); i != -1 {\n\t\t\tname, opts = name[:i], name[i+1:]\n\t\t}\n\t\tif name == \"\" {\n\t\t\tif !f.Anonymous {\n\t\t\t\tname = f.Name\n\t\t\t}\n\t\t} else if name == \"-\" {\n\t\t\tc.byIndex[i] = structTag{name: name}\n\t\t\tcontinue\n\t\t} else if !validPropertyName(name) {\n\t\t\treturn nil, fmt.Errorf(\"datastore: struct tag has invalid property name: %q\", name)\n\t\t}\n\n\t\tsubstructType, fIsSlice := reflect.Type(nil), false\n\t\tswitch f.Type.Kind() {\n\t\tcase reflect.Struct:\n\t\t\tsubstructType = f.Type\n\t\tcase reflect.Slice:\n\t\t\tif f.Type.Elem().Kind() == reflect.Struct {\n\t\t\t\tsubstructType = f.Type.Elem()\n\t\t\t}\n\t\t\tfIsSlice = f.Type != typeOfByteSlice\n\t\t\tc.hasSlice = c.hasSlice || fIsSlice\n\t\t}\n\n\t\tif substructType != nil && substructType != typeOfTime {\n\t\t\tif name != \"\" {\n\t\t\t\tname = name + \".\"\n\t\t\t}\n\t\t\tsub, err := getStructCodecLocked(substructType)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !sub.complete {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: recursive struct: field %q\", f.Name)\n\t\t\t}\n\t\t\tif fIsSlice && sub.hasSlice {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"datastore: flattening nested structs leads to a slice of slices: field %q\", f.Name)\n\t\t\t}\n\t\t\tc.hasSlice = c.hasSlice || sub.hasSlice\n\t\t\tfor relName := range sub.byName {\n\t\t\t\tabsName := name + relName\n\t\t\t\tif _, ok := c.byName[absName]; ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"datastore: struct tag has repeated property name: %q\", absName)\n\t\t\t\t}\n\t\t\t\tc.byName[absName] = fieldCodec{index: i, substructCodec: sub}\n\t\t\t}\n\t\t} else {\n\t\t\tif _, ok := c.byName[name]; ok {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: struct tag has repeated property name: %q\", name)\n\t\t\t}\n\t\t\tc.byName[name] = fieldCodec{index: i}\n\t\t}\n\n\t\tc.byIndex[i] = structTag{\n\t\t\tname:    name,\n\t\t\tnoIndex: opts == \"noindex\",\n\t\t}\n\t}\n\tc.complete = true\n\treturn c, nil\n}\n\n// structPLS adapts a struct to be a PropertyLoadSaver.\ntype structPLS struct {\n\tv     reflect.Value\n\tcodec *structCodec\n}\n\n// newStructPLS returns a PropertyLoadSaver for the struct pointer p.\nfunc newStructPLS(p interface{}) (PropertyLoadSaver, error) {\n\tv := reflect.ValueOf(p)\n\tif v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {\n\t\treturn nil, ErrInvalidEntityType\n\t}\n\tv = v.Elem()\n\tcodec, err := getStructCodec(v.Type())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn structPLS{v, codec}, nil\n}\n\n// LoadStruct loads the properties from p to dst.\n// dst must be a struct pointer.\nfunc LoadStruct(dst interface{}, p []Property) error {\n\tx, err := newStructPLS(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn x.Load(p)\n}\n\n// SaveStruct returns the properties from src as a slice of Properties.\n// src must be a struct pointer.\nfunc SaveStruct(src interface{}) ([]Property, error) {\n\tx, err := newStructPLS(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn x.Save()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/query.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage datastore\n\nimport (\n\t\"encoding/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\tpb \"google.golang.org/cloud/internal/datastore\"\n)\n\ntype operator int\n\nconst (\n\tlessThan operator = iota\n\tlessEq\n\tequal\n\tgreaterEq\n\tgreaterThan\n\n\tkeyFieldName = \"__key__\"\n)\n\nvar operatorToProto = map[operator]*pb.PropertyFilter_Operator{\n\tlessThan:    pb.PropertyFilter_LESS_THAN.Enum(),\n\tlessEq:      pb.PropertyFilter_LESS_THAN_OR_EQUAL.Enum(),\n\tequal:       pb.PropertyFilter_EQUAL.Enum(),\n\tgreaterEq:   pb.PropertyFilter_GREATER_THAN_OR_EQUAL.Enum(),\n\tgreaterThan: pb.PropertyFilter_GREATER_THAN.Enum(),\n}\n\n// filter is a conditional filter on query results.\ntype filter struct {\n\tFieldName string\n\tOp        operator\n\tValue     interface{}\n}\n\ntype sortDirection int\n\nconst (\n\tascending sortDirection = iota\n\tdescending\n)\n\nvar sortDirectionToProto = map[sortDirection]*pb.PropertyOrder_Direction{\n\tascending:  pb.PropertyOrder_ASCENDING.Enum(),\n\tdescending: pb.PropertyOrder_DESCENDING.Enum(),\n}\n\n// order is a sort order on query results.\ntype order struct {\n\tFieldName string\n\tDirection sortDirection\n}\n\n// NewQuery creates a new Query for a specific entity kind.\n//\n// An empty kind means to return all entities, including entities created and\n// managed by other App Engine features, and is called a kindless query.\n// Kindless queries cannot include filters or sort orders on property values.\nfunc NewQuery(kind string) *Query {\n\treturn &Query{\n\t\tkind:  kind,\n\t\tlimit: -1,\n\t}\n}\n\n// Query represents a datastore query.\ntype Query struct {\n\tkind       string\n\tancestor   *Key\n\tfilter     []filter\n\torder      []order\n\tprojection []string\n\n\tdistinct bool\n\tkeysOnly bool\n\teventual bool\n\tlimit    int32\n\toffset   int32\n\tstart    []byte\n\tend      []byte\n\ttrans    *Transaction\n\n\terr error\n}\n\nfunc (q *Query) clone() *Query {\n\tx := *q\n\t// Copy the contents of the slice-typed fields to a new backing store.\n\tif len(q.filter) > 0 {\n\t\tx.filter = make([]filter, len(q.filter))\n\t\tcopy(x.filter, q.filter)\n\t}\n\tif len(q.order) > 0 {\n\t\tx.order = make([]order, len(q.order))\n\t\tcopy(x.order, q.order)\n\t}\n\treturn &x\n}\n\n// Ancestor returns a derivative query with an ancestor filter.\n// The ancestor should not be nil.\nfunc (q *Query) Ancestor(ancestor *Key) *Query {\n\tq = q.clone()\n\tif ancestor == nil {\n\t\tq.err = errors.New(\"datastore: nil query ancestor\")\n\t\treturn q\n\t}\n\tq.ancestor = ancestor\n\treturn q\n}\n\n// EventualConsistency returns a derivative query that returns eventually\n// consistent results.\n// It only has an effect on ancestor queries.\nfunc (q *Query) EventualConsistency() *Query {\n\tq = q.clone()\n\tq.eventual = true\n\treturn q\n}\n\n// Transaction returns a derivative query that is associated with the given\n// transaction.\n//\n// All reads performed as part of the transaction will come from a single\n// consistent snapshot. Furthermore, if the transaction is set to a\n// serializable isolation level, another transaction cannot concurrently modify\n// the data that is read or modified by this transaction.\nfunc (q *Query) Transaction(t *Transaction) *Query {\n\tq = q.clone()\n\tq.trans = t\n\treturn q\n}\n\n// Filter returns a derivative query with a field-based filter.\n// The filterStr argument must be a field name followed by optional space,\n// followed by an operator, one of \">\", \"<\", \">=\", \"<=\", or \"=\".\n// Fields are compared against the provided value using the operator.\n// Multiple filters are AND'ed together.\n// Field names which contain spaces, quote marks, or operator characters\n// should be passed as quoted Go string literals as returned by strconv.Quote\n// or the fmt package's %q verb.\nfunc (q *Query) Filter(filterStr string, value interface{}) *Query {\n\tq = q.clone()\n\tfilterStr = strings.TrimSpace(filterStr)\n\tif filterStr == \"\" {\n\t\tq.err = fmt.Errorf(\"datastore: invalid filter %q\", filterStr)\n\t\treturn q\n\t}\n\tf := filter{\n\t\tFieldName: strings.TrimRight(filterStr, \" ><=!\"),\n\t\tValue:     value,\n\t}\n\tswitch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {\n\tcase \"<=\":\n\t\tf.Op = lessEq\n\tcase \">=\":\n\t\tf.Op = greaterEq\n\tcase \"<\":\n\t\tf.Op = lessThan\n\tcase \">\":\n\t\tf.Op = greaterThan\n\tcase \"=\":\n\t\tf.Op = equal\n\tdefault:\n\t\tq.err = fmt.Errorf(\"datastore: invalid operator %q in filter %q\", op, filterStr)\n\t\treturn q\n\t}\n\tvar err error\n\tf.FieldName, err = unquote(f.FieldName)\n\tif err != nil {\n\t\tq.err = fmt.Errorf(\"datastore: invalid syntax for quoted field name %q\", f.FieldName)\n\t\treturn q\n\t}\n\tq.filter = append(q.filter, f)\n\treturn q\n}\n\n// Order returns a derivative query with a field-based sort order. Orders are\n// applied in the order they are added. The default order is ascending; to sort\n// in descending order prefix the fieldName with a minus sign (-).\n// Field names which contain spaces, quote marks, or the minus sign\n// should be passed as quoted Go string literals as returned by strconv.Quote\n// or the fmt package's %q verb.\nfunc (q *Query) Order(fieldName string) *Query {\n\tq = q.clone()\n\tfieldName, dir := strings.TrimSpace(fieldName), ascending\n\tif strings.HasPrefix(fieldName, \"-\") {\n\t\tfieldName, dir = strings.TrimSpace(fieldName[1:]), descending\n\t} else if strings.HasPrefix(fieldName, \"+\") {\n\t\tq.err = fmt.Errorf(\"datastore: invalid order: %q\", fieldName)\n\t\treturn q\n\t}\n\tfieldName, err := unquote(fieldName)\n\tif err != nil {\n\t\tq.err = fmt.Errorf(\"datastore: invalid syntax for quoted field name %q\", fieldName)\n\t\treturn q\n\t}\n\tif fieldName == \"\" {\n\t\tq.err = errors.New(\"datastore: empty order\")\n\t\treturn q\n\t}\n\tq.order = append(q.order, order{\n\t\tDirection: dir,\n\t\tFieldName: fieldName,\n\t})\n\treturn q\n}\n\n// unquote optionally interprets s as a double-quoted or backquoted Go\n// string literal if it begins with the relevant character.\nfunc unquote(s string) (string, error) {\n\tif s == \"\" || (s[0] != '`' && s[0] != '\"') {\n\t\treturn s, nil\n\t}\n\treturn strconv.Unquote(s)\n}\n\n// Project returns a derivative query that yields only the given fields. It\n// cannot be used with KeysOnly.\nfunc (q *Query) Project(fieldNames ...string) *Query {\n\tq = q.clone()\n\tq.projection = append([]string(nil), fieldNames...)\n\treturn q\n}\n\n// Distinct returns a derivative query that yields de-duplicated entities with\n// respect to the set of projected fields. It is only used for projection\n// queries.\nfunc (q *Query) Distinct() *Query {\n\tq = q.clone()\n\tq.distinct = true\n\treturn q\n}\n\n// KeysOnly returns a derivative query that yields only keys, not keys and\n// entities. It cannot be used with projection queries.\nfunc (q *Query) KeysOnly() *Query {\n\tq = q.clone()\n\tq.keysOnly = true\n\treturn q\n}\n\n// Limit returns a derivative query that has a limit on the number of results\n// returned. A negative value means unlimited.\nfunc (q *Query) Limit(limit int) *Query {\n\tq = q.clone()\n\tif limit < math.MinInt32 || limit > math.MaxInt32 {\n\t\tq.err = errors.New(\"datastore: query limit overflow\")\n\t\treturn q\n\t}\n\tq.limit = int32(limit)\n\treturn q\n}\n\n// Offset returns a derivative query that has an offset of how many keys to\n// skip over before returning results. A negative value is invalid.\nfunc (q *Query) Offset(offset int) *Query {\n\tq = q.clone()\n\tif offset < 0 {\n\t\tq.err = errors.New(\"datastore: negative query offset\")\n\t\treturn q\n\t}\n\tif offset > math.MaxInt32 {\n\t\tq.err = errors.New(\"datastore: query offset overflow\")\n\t\treturn q\n\t}\n\tq.offset = int32(offset)\n\treturn q\n}\n\n// Start returns a derivative query with the given start point.\nfunc (q *Query) Start(c Cursor) *Query {\n\tq = q.clone()\n\tif c.cc == nil {\n\t\tq.err = errors.New(\"datastore: invalid cursor\")\n\t\treturn q\n\t}\n\tq.start = c.cc\n\treturn q\n}\n\n// End returns a derivative query with the given end point.\nfunc (q *Query) End(c Cursor) *Query {\n\tq = q.clone()\n\tif c.cc == nil {\n\t\tq.err = errors.New(\"datastore: invalid cursor\")\n\t\treturn q\n\t}\n\tq.end = c.cc\n\treturn q\n}\n\n// toProto converts the query to a protocol buffer.\nfunc (q *Query) toProto(req *pb.RunQueryRequest) error {\n\tdst := pb.Query{}\n\tif len(q.projection) != 0 && q.keysOnly {\n\t\treturn errors.New(\"datastore: query cannot both project and be keys-only\")\n\t}\n\tdst.Reset()\n\tif q.kind != \"\" {\n\t\tdst.Kind = []*pb.KindExpression{&pb.KindExpression{Name: proto.String(q.kind)}}\n\t}\n\tif q.projection != nil {\n\t\tfor _, propertyName := range q.projection {\n\t\t\tdst.Projection = append(dst.Projection, &pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(propertyName)}})\n\t\t}\n\n\t\tif q.distinct {\n\t\t\tfor _, propertyName := range q.projection {\n\t\t\t\tdst.GroupBy = append(dst.GroupBy, &pb.PropertyReference{Name: proto.String(propertyName)})\n\t\t\t}\n\t\t}\n\t}\n\tif q.keysOnly {\n\t\tdst.Projection = []*pb.PropertyExpression{&pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(keyFieldName)}}}\n\t}\n\n\tvar filters []*pb.Filter\n\tfor _, qf := range q.filter {\n\t\tif qf.FieldName == \"\" {\n\t\t\treturn errors.New(\"datastore: empty query filter field name\")\n\t\t}\n\t\tv, errStr := interfaceToProto(reflect.ValueOf(qf.Value).Interface())\n\t\tif errStr != \"\" {\n\t\t\treturn errors.New(\"datastore: bad query filter value type: \" + errStr)\n\t\t}\n\t\txf := &pb.PropertyFilter{\n\t\t\tOperator: operatorToProto[qf.Op],\n\t\t\tProperty: &pb.PropertyReference{Name: proto.String(qf.FieldName)},\n\t\t\tValue:    v,\n\t\t}\n\t\tif xf.Operator == nil {\n\t\t\treturn errors.New(\"datastore: unknown query filter operator\")\n\t\t}\n\t\tfilters = append(filters, &pb.Filter{PropertyFilter: xf})\n\t}\n\n\tif q.ancestor != nil {\n\t\tfilters = append(filters, &pb.Filter{\n\t\t\tPropertyFilter: &pb.PropertyFilter{\n\t\t\t\tProperty: &pb.PropertyReference{Name: proto.String(\"__key__\")},\n\t\t\t\tOperator: pb.PropertyFilter_HAS_ANCESTOR.Enum(),\n\t\t\t\tValue:    &pb.Value{KeyValue: keyToProto(q.ancestor)},\n\t\t\t}})\n\t}\n\n\tif len(filters) == 1 {\n\t\tdst.Filter = filters[0]\n\t} else if len(filters) > 1 {\n\t\tdst.Filter = &pb.Filter{CompositeFilter: &pb.CompositeFilter{\n\t\t\tOperator: pb.CompositeFilter_AND.Enum(),\n\t\t\tFilter:   filters,\n\t\t}}\n\t}\n\n\tfor _, qo := range q.order {\n\t\tif qo.FieldName == \"\" {\n\t\t\treturn errors.New(\"datastore: empty query order field name\")\n\t\t}\n\t\txo := &pb.PropertyOrder{\n\t\t\tProperty:  &pb.PropertyReference{Name: proto.String(qo.FieldName)},\n\t\t\tDirection: sortDirectionToProto[qo.Direction],\n\t\t}\n\t\tif xo.Direction == nil {\n\t\t\treturn errors.New(\"datastore: unknown query order direction\")\n\t\t}\n\t\tdst.Order = append(dst.Order, xo)\n\t}\n\tif q.limit >= 0 {\n\t\tdst.Limit = proto.Int32(q.limit)\n\t}\n\tif q.offset != 0 {\n\t\tdst.Offset = proto.Int32(q.offset)\n\t}\n\tdst.StartCursor = q.start\n\tdst.EndCursor = q.end\n\n\tif t := q.trans; t != nil {\n\t\tif t.id == nil {\n\t\t\treturn errExpiredTransaction\n\t\t}\n\t\treq.ReadOptions = &pb.ReadOptions{Transaction: t.id}\n\t}\n\n\treq.Query = &dst\n\treturn nil\n}\n\n// Count returns the number of results for the query.\nfunc (q *Query) Count(ctx context.Context) (int, error) {\n\t// Check that the query is well-formed.\n\tif q.err != nil {\n\t\treturn 0, q.err\n\t}\n\n\t// Run a copy of the query, with keysOnly true (if we're not a projection,\n\t// since the two are incompatible).\n\tnewQ := q.clone()\n\tnewQ.keysOnly = len(newQ.projection) == 0\n\treq := &pb.RunQueryRequest{}\n\n\tif ns := ctxNamespace(ctx); ns != \"\" {\n\t\treq.PartitionId = &pb.PartitionId{\n\t\t\tNamespace: proto.String(ns),\n\t\t}\n\t}\n\tif err := newQ.toProto(req); err != nil {\n\t\treturn 0, err\n\t}\n\tres := &pb.RunQueryResponse{}\n\tif err := call(ctx, \"runQuery\", req, res); err != nil {\n\t\treturn 0, err\n\t}\n\tvar n int\n\tb := res.Batch\n\tfor {\n\t\tn += len(b.GetEntityResult())\n\t\tif b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED {\n\t\t\tbreak\n\t\t}\n\t\tvar err error\n\t\t// TODO(jbd): Support count queries that have a limit and an offset.\n\t\tif err = callNext(ctx, req, res, 0, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn int(n), nil\n}\n\nfunc callNext(ctx context.Context, req *pb.RunQueryRequest, res *pb.RunQueryResponse, offset, limit int32) error {\n\tif res.GetBatch().EndCursor == nil {\n\t\treturn errors.New(\"datastore: internal error: server did not return a cursor\")\n\t}\n\treq.Query.StartCursor = res.GetBatch().GetEndCursor()\n\tif limit >= 0 {\n\t\treq.Query.Limit = proto.Int32(limit)\n\t}\n\tif offset != 0 {\n\t\treq.Query.Offset = proto.Int32(offset)\n\t}\n\tres.Reset()\n\treturn call(ctx, \"runQuery\", req, res)\n}\n\n// GetAll runs the query in the given context and returns all keys that match\n// that query, as well as appending the values to dst.\n//\n// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-\n// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.\n//\n// As a special case, *PropertyList is an invalid type for dst, even though a\n// PropertyList is a slice of structs. It is treated as invalid to avoid being\n// mistakenly passed when *[]PropertyList was intended.\n//\n// The keys returned by GetAll will be in a 1-1 correspondence with the entities\n// added to dst.\n//\n// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.\nfunc (q *Query) GetAll(ctx context.Context, dst interface{}) ([]*Key, error) {\n\tvar (\n\t\tdv               reflect.Value\n\t\tmat              multiArgType\n\t\telemType         reflect.Type\n\t\terrFieldMismatch error\n\t)\n\tif !q.keysOnly {\n\t\tdv = reflect.ValueOf(dst)\n\t\tif dv.Kind() != reflect.Ptr || dv.IsNil() {\n\t\t\treturn nil, ErrInvalidEntityType\n\t\t}\n\t\tdv = dv.Elem()\n\t\tmat, elemType = checkMultiArg(dv)\n\t\tif mat == multiArgTypeInvalid || mat == multiArgTypeInterface {\n\t\t\treturn nil, ErrInvalidEntityType\n\t\t}\n\t}\n\n\tvar keys []*Key\n\tfor t := q.Run(ctx); ; {\n\t\tk, e, err := t.next()\n\t\tif err == Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn keys, err\n\t\t}\n\t\tif !q.keysOnly {\n\t\t\tev := reflect.New(elemType)\n\t\t\tif elemType.Kind() == reflect.Map {\n\t\t\t\t// This is a special case. The zero values of a map type are\n\t\t\t\t// not immediately useful; they have to be make'd.\n\t\t\t\t//\n\t\t\t\t// Funcs and channels are similar, in that a zero value is not useful,\n\t\t\t\t// but even a freshly make'd channel isn't useful: there's no fixed\n\t\t\t\t// channel buffer size that is always going to be large enough, and\n\t\t\t\t// there's no goroutine to drain the other end. Theoretically, these\n\t\t\t\t// types could be supported, for example by sniffing for a constructor\n\t\t\t\t// method or requiring prior registration, but for now it's not a\n\t\t\t\t// frequent enough concern to be worth it. Programmers can work around\n\t\t\t\t// it by explicitly using Iterator.Next instead of the Query.GetAll\n\t\t\t\t// convenience method.\n\t\t\t\tx := reflect.MakeMap(elemType)\n\t\t\t\tev.Elem().Set(x)\n\t\t\t}\n\t\t\tif err = loadEntity(ev.Interface(), e); err != nil {\n\t\t\t\tif _, ok := err.(*ErrFieldMismatch); ok {\n\t\t\t\t\t// We continue loading entities even in the face of field mismatch errors.\n\t\t\t\t\t// If we encounter any other error, that other error is returned. Otherwise,\n\t\t\t\t\t// an ErrFieldMismatch is returned.\n\t\t\t\t\terrFieldMismatch = err\n\t\t\t\t} else {\n\t\t\t\t\treturn keys, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif mat != multiArgTypeStructPtr {\n\t\t\t\tev = ev.Elem()\n\t\t\t}\n\t\t\tdv.Set(reflect.Append(dv, ev))\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys, errFieldMismatch\n}\n\n// Run runs the query in the given context.\nfunc (q *Query) Run(ctx context.Context) *Iterator {\n\tif q.err != nil {\n\t\treturn &Iterator{err: q.err}\n\t}\n\tt := &Iterator{\n\t\tctx:    ctx,\n\t\tlimit:  q.limit,\n\t\tq:      q,\n\t\tprevCC: q.start,\n\t}\n\tt.req.Reset()\n\tif ns := ctxNamespace(ctx); ns != \"\" {\n\t\tt.req.PartitionId = &pb.PartitionId{\n\t\t\tNamespace: proto.String(ns),\n\t\t}\n\t}\n\tif err := q.toProto(&t.req); err != nil {\n\t\tt.err = err\n\t\treturn t\n\t}\n\tif err := call(ctx, \"runQuery\", &t.req, &t.res); err != nil {\n\t\tt.err = err\n\t\treturn t\n\t}\n\tb := t.res.GetBatch()\n\toffset := q.offset - b.GetSkippedResults()\n\tfor offset > 0 && b.GetMoreResults() == pb.QueryResultBatch_NOT_FINISHED {\n\t\tt.prevCC = b.GetEndCursor()\n\t\tvar err error\n\t\tif err = callNext(t.ctx, &t.req, &t.res, offset, t.limit); err != nil {\n\t\t\tt.err = err\n\t\t\tbreak\n\t\t}\n\t\tskip := b.GetSkippedResults()\n\t\tif skip < 0 {\n\t\t\tt.err = errors.New(\"datastore: internal error: negative number of skipped_results\")\n\t\t\tbreak\n\t\t}\n\t\toffset -= skip\n\t}\n\tif offset < 0 {\n\t\tt.err = errors.New(\"datastore: internal error: query offset was overshot\")\n\t}\n\treturn t\n}\n\n// Iterator is the result of running a query.\ntype Iterator struct {\n\tctx context.Context\n\terr error\n\t// req is the request we sent previously, we need to keep track of it to resend it\n\treq pb.RunQueryRequest\n\t// res is the result of the most recent RunQuery or Next API call.\n\tres pb.RunQueryResponse\n\t// i is how many elements of res.Result we have iterated over.\n\ti int\n\t// limit is the limit on the number of results this iterator should return.\n\t// A negative value means unlimited.\n\tlimit int32\n\t// q is the original query which yielded this iterator.\n\tq *Query\n\t// prevCC is the compiled cursor that marks the end of the previous batch\n\t// of results.\n\tprevCC []byte\n}\n\n// Done is returned when a query iteration has completed.\nvar Done = errors.New(\"datastore: query has no more results\")\n\n// Next returns the key of the next result. When there are no more results,\n// Done is returned as the error.\n//\n// If the query is not keys only and dst is non-nil, it also loads the entity\n// stored for that key into the struct pointer or PropertyLoadSaver dst, with\n// the same semantics and possible errors as for the Get function.\nfunc (t *Iterator) Next(dst interface{}) (*Key, error) {\n\tk, e, err := t.next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif dst != nil && !t.q.keysOnly {\n\t\terr = loadEntity(dst, e)\n\t}\n\treturn k, err\n}\n\nfunc (t *Iterator) next() (*Key, *pb.Entity, error) {\n\tif t.err != nil {\n\t\treturn nil, nil, t.err\n\t}\n\n\t// Issue datastore_v3/Next RPCs as necessary.\n\tb := t.res.GetBatch()\n\tfor t.i == len(b.EntityResult) {\n\t\tif b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED {\n\t\t\tt.err = Done\n\t\t\treturn nil, nil, t.err\n\t\t}\n\t\tt.prevCC = b.GetEndCursor()\n\t\tif err := callNext(t.ctx, &t.req, &t.res, 0, t.limit); err != nil {\n\t\t\tt.err = err\n\t\t\treturn nil, nil, t.err\n\t\t}\n\t\tif b.GetSkippedResults() != 0 {\n\t\t\tt.err = errors.New(\"datastore: internal error: iterator has skipped results\")\n\t\t\treturn nil, nil, t.err\n\t\t}\n\t\tt.i = 0\n\t\tif t.limit >= 0 {\n\t\t\tt.limit -= int32(len(b.EntityResult))\n\t\t\tif t.limit < 0 {\n\t\t\t\tt.err = errors.New(\"datastore: internal error: query returned more results than the limit\")\n\t\t\t\treturn nil, nil, t.err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Extract the key from the t.i'th element of t.res.Result.\n\te := b.EntityResult[t.i]\n\tt.i++\n\tif e.Entity.Key == nil {\n\t\treturn nil, nil, errors.New(\"datastore: internal error: server did not return a key\")\n\t}\n\tk := protoToKey(e.Entity.Key)\n\tif k.Incomplete() {\n\t\treturn nil, nil, errors.New(\"datastore: internal error: server returned an invalid key\")\n\t}\n\treturn k, e.Entity, nil\n}\n\n// Cursor returns a cursor for the iterator's current location.\nfunc (t *Iterator) Cursor() (Cursor, error) {\n\tif t.err != nil && t.err != Done {\n\t\treturn Cursor{}, t.err\n\t}\n\t// If we are at either end of the current batch of results,\n\t// return the compiled cursor at that end.\n\tb := t.res.Batch\n\tskipped := b.GetSkippedResults()\n\tif t.i == 0 && skipped == 0 {\n\t\tif t.prevCC == nil {\n\t\t\t// A nil pointer (of type *pb.CompiledCursor) means no constraint:\n\t\t\t// passing it as the end cursor of a new query means unlimited results\n\t\t\t// (glossing over the integer limit parameter for now).\n\t\t\t// A non-nil pointer to an empty pb.CompiledCursor means the start:\n\t\t\t// passing it as the end cursor of a new query means 0 results.\n\t\t\t// If prevCC was nil, then the original query had no start cursor, but\n\t\t\t// Iterator.Cursor should return \"the start\" instead of unlimited.\n\t\t\treturn Cursor{}, nil\n\t\t}\n\t\treturn Cursor{t.prevCC}, nil\n\t}\n\tif t.i == len(b.EntityResult) {\n\t\treturn Cursor{b.EndCursor}, nil\n\t}\n\t// Otherwise, re-run the query offset to this iterator's position, starting from\n\t// the most recent compiled cursor. This is done on a best-effort basis, as it\n\t// is racy; if a concurrent process has added or removed entities, then the\n\t// cursor returned may be inconsistent.\n\tq := t.q.clone()\n\tq.start = t.prevCC\n\tq.offset = skipped + int32(t.i)\n\tq.limit = 0\n\tq.keysOnly = len(q.projection) == 0\n\tt1 := q.Run(t.ctx)\n\t_, _, err := t1.next()\n\tif err != Done {\n\t\tif err == nil {\n\t\t\terr = fmt.Errorf(\"datastore: internal error: zero-limit query did not have zero results\")\n\t\t}\n\t\treturn Cursor{}, err\n\t}\n\treturn Cursor{t1.res.Batch.EndCursor}, nil\n}\n\n// Cursor is an iterator's position. It can be converted to and from an opaque\n// string. A cursor can be used from different HTTP requests, but only with a\n// query with the same kind, ancestor, filter and order constraints.\ntype Cursor struct {\n\tcc []byte\n}\n\n// String returns a base-64 string representation of a cursor.\nfunc (c Cursor) String() string {\n\tif c.cc == nil {\n\t\treturn \"\"\n\t}\n\n\treturn strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), \"=\")\n}\n\n// Decode decodes a cursor from its base-64 string representation.\nfunc DecodeCursor(s string) (Cursor, error) {\n\tif s == \"\" {\n\t\treturn Cursor{}, nil\n\t}\n\tif n := len(s) % 4; n != 0 {\n\t\ts += strings.Repeat(\"=\", 4-n)\n\t}\n\tb, err := base64.URLEncoding.DecodeString(s)\n\tif err != nil {\n\t\treturn Cursor{}, err\n\t}\n\treturn Cursor{b}, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/save.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage datastore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tpb \"google.golang.org/cloud/internal/datastore\"\n)\n\n// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.\nfunc saveEntity(key *Key, src interface{}) (*pb.Entity, error) {\n\tvar err error\n\tvar props []Property\n\tif e, ok := src.(PropertyLoadSaver); ok {\n\t\tprops, err = e.Save()\n\t} else {\n\t\tprops, err = SaveStruct(src)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn propertiesToProto(key, props)\n}\n\nfunc saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error {\n\tp := Property{\n\t\tName:     name,\n\t\tNoIndex:  noIndex,\n\t\tMultiple: multiple,\n\t}\n\n\tswitch x := v.Interface().(type) {\n\tcase *Key, time.Time:\n\t\tp.Value = x\n\tdefault:\n\t\tswitch v.Kind() {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\tp.Value = v.Int()\n\t\tcase reflect.Bool:\n\t\t\tp.Value = v.Bool()\n\t\tcase reflect.String:\n\t\t\tp.Value = v.String()\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tp.Value = v.Float()\n\t\tcase reflect.Slice:\n\t\t\tif v.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t\tp.Value = v.Bytes()\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tif !v.CanAddr() {\n\t\t\t\treturn fmt.Errorf(\"datastore: unsupported struct field: value is unaddressable\")\n\t\t\t}\n\t\t\tsub, err := newStructPLS(v.Addr().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"datastore: unsupported struct field: %v\", err)\n\t\t\t}\n\t\t\treturn sub.(structPLS).save(props, name, noIndex, multiple)\n\t\t}\n\t}\n\tif p.Value == nil {\n\t\treturn fmt.Errorf(\"datastore: unsupported struct field type: %v\", v.Type())\n\t}\n\t*props = append(*props, p)\n\treturn nil\n}\n\nfunc (s structPLS) Save() ([]Property, error) {\n\tvar props []Property\n\tif err := s.save(&props, \"\", false, false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn props, nil\n}\n\nfunc (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error {\n\tfor i, t := range s.codec.byIndex {\n\t\tif t.name == \"-\" {\n\t\t\tcontinue\n\t\t}\n\t\tname := t.name\n\t\tif prefix != \"\" {\n\t\t\tname = prefix + name\n\t\t}\n\t\tv := s.v.Field(i)\n\t\tif !v.IsValid() || !v.CanSet() {\n\t\t\tcontinue\n\t\t}\n\t\tnoIndex1 := noIndex || t.noIndex\n\t\t// For slice fields that aren't []byte, save each element.\n\t\tif v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\tfor j := 0; j < v.Len(); j++ {\n\t\t\t\tif err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// Otherwise, save the field itself.\n\t\tif err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc propertiesToProto(key *Key, props []Property) (*pb.Entity, error) {\n\te := &pb.Entity{\n\t\tKey: keyToProto(key),\n\t}\n\tindexedProps := 0\n\tprevMultiple := make(map[string]*pb.Property)\n\tfor _, p := range props {\n\t\tval, err := interfaceToProto(p.Value)\n\t\tif err != \"\" {\n\t\t\treturn nil, fmt.Errorf(\"datastore: %s for a Property with Name %q\", err, p.Name)\n\t\t}\n\t\tif !p.NoIndex {\n\t\t\trVal := reflect.ValueOf(p.Value)\n\t\t\tif rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\t\tindexedProps += rVal.Len()\n\t\t\t} else {\n\t\t\t\tindexedProps++\n\t\t\t}\n\t\t}\n\t\tif indexedProps > maxIndexedProperties {\n\t\t\treturn nil, errors.New(\"datastore: too many indexed properties\")\n\t\t}\n\t\tswitch v := p.Value.(type) {\n\t\tcase string:\n\t\tcase []byte:\n\t\t\tif len(v) > 1500 && !p.NoIndex {\n\t\t\t\treturn nil, fmt.Errorf(\"datastore: cannot index a Property with Name %q\", p.Name)\n\t\t\t}\n\t\t}\n\t\tval.Indexed = proto.Bool(!p.NoIndex)\n\t\tif p.Multiple {\n\t\t\tx, ok := prevMultiple[p.Name]\n\t\t\tif !ok {\n\t\t\t\tx = &pb.Property{\n\t\t\t\t\tName:  proto.String(p.Name),\n\t\t\t\t\tValue: &pb.Value{},\n\t\t\t\t}\n\t\t\t\tprevMultiple[p.Name] = x\n\t\t\t\te.Property = append(e.Property, x)\n\t\t\t}\n\t\t\tx.Value.ListValue = append(x.Value.ListValue, val)\n\t\t} else {\n\t\t\te.Property = append(e.Property, &pb.Property{\n\t\t\t\tName:  proto.String(p.Name),\n\t\t\t\tValue: val,\n\t\t\t})\n\t\t}\n\t}\n\treturn e, nil\n}\n\nfunc interfaceToProto(iv interface{}) (p *pb.Value, errStr string) {\n\tval := new(pb.Value)\n\tswitch v := iv.(type) {\n\tcase int:\n\t\tval.IntegerValue = proto.Int64(int64(v))\n\tcase int32:\n\t\tval.IntegerValue = proto.Int64(int64(v))\n\tcase int64:\n\t\tval.IntegerValue = proto.Int64(v)\n\tcase bool:\n\t\tval.BooleanValue = proto.Bool(v)\n\tcase string:\n\t\tval.StringValue = proto.String(v)\n\tcase float32:\n\t\tval.DoubleValue = proto.Float64(float64(v))\n\tcase float64:\n\t\tval.DoubleValue = proto.Float64(v)\n\tcase *Key:\n\t\tif v != nil {\n\t\t\tval.KeyValue = keyToProto(v)\n\t\t}\n\tcase time.Time:\n\t\tif v.Before(minTime) || v.After(maxTime) {\n\t\t\treturn nil, fmt.Sprintf(\"time value out of range\")\n\t\t}\n\t\tval.TimestampMicrosecondsValue = proto.Int64(toUnixMicro(v))\n\tcase []byte:\n\t\tval.BlobValue = v\n\tdefault:\n\t\tif iv != nil {\n\t\t\treturn nil, fmt.Sprintf(\"invalid Value type %t\", iv)\n\t\t}\n\t}\n\t// TODO(jbd): Support ListValue and EntityValue.\n\t// TODO(jbd): Support types whose underlying type is one of the types above.\n\treturn val, \"\"\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/time.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage datastore\n\nimport (\n\t\"math\"\n\t\"time\"\n)\n\nvar (\n\tminTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)\n\tmaxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)\n)\n\nfunc toUnixMicro(t time.Time) int64 {\n\t// We cannot use t.UnixNano() / 1e3 because we want to handle times more than\n\t// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot\n\t// be represented in the numerator of a single int64 divide.\n\treturn t.Unix()*1e6 + int64(t.Nanosecond()/1e3)\n}\n\nfunc fromUnixMicro(t int64) time.Time {\n\treturn time.Unix(t/1e6, (t%1e6)*1e3)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/datastore/transaction.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage datastore\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\n\tpb \"google.golang.org/cloud/internal/datastore\"\n)\n\n// ErrConcurrentTransaction is returned when a transaction is rolled back due\n// to a conflict with a concurrent transaction.\nvar ErrConcurrentTransaction = errors.New(\"datastore: concurrent transaction\")\n\nvar errExpiredTransaction = errors.New(\"datastore: transaction expired\")\n\n// A TransactionOption configures the Transaction returned by NewTransaction.\ntype TransactionOption interface {\n\tapply(*pb.BeginTransactionRequest)\n}\n\ntype isolation struct {\n\tlevel pb.BeginTransactionRequest_IsolationLevel\n}\n\nfunc (i isolation) apply(req *pb.BeginTransactionRequest) {\n\treq.IsolationLevel = i.level.Enum()\n}\n\nvar (\n\t// Snapshot causes the transaction to enforce a snapshot isolation level.\n\tSnapshot TransactionOption = isolation{pb.BeginTransactionRequest_SNAPSHOT}\n\t// Serializable causes the transaction to enforce a serializable isolation level.\n\tSerializable TransactionOption = isolation{pb.BeginTransactionRequest_SERIALIZABLE}\n)\n\n// Transaction represents a set of datastore operations to be committed atomically.\n//\n// Operations are enqueued by calling the Put and Delete methods on Transaction\n// (or their Multi-equivalents).  These operations are only committed when the\n// Commit method is invoked. To ensure consistency, reads must be performed by\n// using Transaction's Get method or by using the Transaction method when\n// building a query.\n//\n// A Transaction must be committed or rolled back exactly once.\ntype Transaction struct {\n\tid       []byte\n\tctx      context.Context\n\tmutation *pb.Mutation  // The mutations to apply.\n\tpending  []*PendingKey // Incomplete keys pending transaction completion.\n}\n\n// NewTransaction starts a new transaction.\nfunc NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) {\n\treq, resp := &pb.BeginTransactionRequest{}, &pb.BeginTransactionResponse{}\n\tfor _, o := range opts {\n\t\to.apply(req)\n\t}\n\tif err := call(ctx, \"beginTransaction\", req, resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Transaction{\n\t\tid:       resp.Transaction,\n\t\tctx:      ctx,\n\t\tmutation: &pb.Mutation{},\n\t}, nil\n}\n\n// Commit applies the enqueued operations atomically.\nfunc (t *Transaction) Commit() (*Commit, error) {\n\tif t.id == nil {\n\t\treturn nil, errExpiredTransaction\n\t}\n\treq := &pb.CommitRequest{\n\t\tTransaction: t.id,\n\t\tMutation:    t.mutation,\n\t\tMode:        pb.CommitRequest_TRANSACTIONAL.Enum(),\n\t}\n\tt.id = nil\n\tresp := &pb.CommitResponse{}\n\tif err := call(t.ctx, \"commit\", req, resp); err != nil {\n\t\tif e, ok := err.(*errHTTP); ok && e.StatusCode == http.StatusConflict {\n\t\t\t// TODO(jbd): Make sure that we explicitly handle the case where response\n\t\t\t// has an HTTP 409 and the error message indicates that it's an concurrent\n\t\t\t// transaction error.\n\t\t\treturn nil, ErrConcurrentTransaction\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t// Copy any newly minted keys into the returned keys.\n\tif len(t.pending) != len(resp.MutationResult.InsertAutoIdKey) {\n\t\treturn nil, errors.New(\"datastore: internal error: server returned the wrong number of keys\")\n\t}\n\tcommit := &Commit{}\n\tfor i, p := range t.pending {\n\t\tp.key = protoToKey(resp.MutationResult.InsertAutoIdKey[i])\n\t\tp.commit = commit\n\t}\n\n\treturn commit, nil\n}\n\n// Rollback abandons a pending transaction.\nfunc (t *Transaction) Rollback() error {\n\tif t.id == nil {\n\t\treturn errExpiredTransaction\n\t}\n\tid := t.id\n\tt.id = nil\n\treturn call(t.ctx, \"rollback\", &pb.RollbackRequest{Transaction: id}, &pb.RollbackResponse{})\n}\n\n// Get is the transaction-specific version of the package function Get.\n// All reads performed during the transaction will come from a single consistent\n// snapshot. Furthermore, if the transaction is set to a serializable isolation\n// level, another transaction cannot concurrently modify the data that is read\n// or modified by this transaction.\nfunc (t *Transaction) Get(key *Key, dst interface{}) error {\n\terr := get(t.ctx, []*Key{key}, []interface{}{dst}, &pb.ReadOptions{Transaction: t.id})\n\tif me, ok := err.(MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// GetMulti is a batch version of Get.\nfunc (t *Transaction) GetMulti(keys []*Key, dst interface{}) error {\n\tif t.id == nil {\n\t\treturn errExpiredTransaction\n\t}\n\treturn get(t.ctx, keys, dst, &pb.ReadOptions{Transaction: t.id})\n}\n\n// Put is the transaction-specific version of the package function Put.\n//\n// Put returns a PendingKey which can be resolved into a Key using the\n// return value from a successful Commit. If key is an incomplete key, the\n// returned pending key will resolve to a unique key generated by the\n// datastore.\nfunc (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) {\n\th, err := t.PutMulti([]*Key{key}, []interface{}{src})\n\tif err != nil {\n\t\tif me, ok := err.(MultiError); ok {\n\t\t\treturn nil, me[0]\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn h[0], nil\n}\n\n// PutMulti is a batch version of Put. One PendingKey is returned for each\n// element of src in the same order.\nfunc (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) {\n\tif t.id == nil {\n\t\treturn nil, errExpiredTransaction\n\t}\n\tmutation, err := putMutation(keys, src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tproto.Merge(t.mutation, mutation)\n\n\t// Prepare the returned handles, pre-populating where possible.\n\tret := make([]*PendingKey, len(keys))\n\tfor _, key := range keys {\n\t\th := &PendingKey{}\n\t\tif key.Incomplete() {\n\t\t\t// This key will be in the final commit result.\n\t\t\tt.pending = append(t.pending, h)\n\t\t} else {\n\t\t\th.key = key\n\t\t}\n\t\tret = append(ret, h)\n\t}\n\treturn ret, nil\n}\n\n// Delete is the transaction-specific version of the package function Delete.\n// Delete enqueues the deletion of the entity for the given key, to be\n// committed atomically upon calling Commit.\nfunc (t *Transaction) Delete(key *Key) error {\n\terr := t.DeleteMulti([]*Key{key})\n\tif me, ok := err.(MultiError); ok {\n\t\treturn me[0]\n\t}\n\treturn err\n}\n\n// DeleteMulti is a batch version of Delete.\nfunc (t *Transaction) DeleteMulti(keys []*Key) error {\n\tif t.id == nil {\n\t\treturn errExpiredTransaction\n\t}\n\tmutation, err := deleteMutation(keys)\n\tif err != nil {\n\t\treturn err\n\t}\n\tproto.Merge(t.mutation, mutation)\n\treturn nil\n}\n\n// Commit represents the result of a committed transaction.\ntype Commit struct{}\n\n// Key resolves a pending key handle into a final key.\nfunc (c *Commit) Key(p *PendingKey) *Key {\n\tif c != p.commit {\n\t\tpanic(\"PendingKey was not created by corresponding transaction\")\n\t}\n\treturn p.key\n}\n\n// PendingKey represents the key for newly-inserted entity. It can be\n// resolved into a Key by calling the Key method of Commit.\ntype PendingKey struct {\n\tkey    *Key\n\tcommit *Commit\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/bigquery/concat_table/main.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// concat_table is an example client of the bigquery client library.\n// It concatenates two BigQuery tables and writes the result to another table.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/cloud/bigquery\"\n)\n\nvar (\n\tproject = flag.String(\"project\", \"\", \"The ID of a Google Cloud Platform project\")\n\tdataset = flag.String(\"dataset\", \"\", \"The ID of a BigQuery dataset\")\n\tsrc1    = flag.String(\"src1\", \"\", \"The ID of the first BigQuery table to concatenate\")\n\tsrc2    = flag.String(\"src2\", \"\", \"The ID of the second BigQuery table to concatenate\")\n\tdest    = flag.String(\"dest\", \"\", \"The ID of the BigQuery table to write the result to\")\n\tpollint = flag.Duration(\"pollint\", 10*time.Second, \"Polling interval for checking job status\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tflagsOk := true\n\tfor _, f := range []string{\"project\", \"dataset\", \"src1\", \"src2\", \"dest\"} {\n\t\tif flag.Lookup(f).Value.String() == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"Flag --%s is required\\n\", f)\n\t\t\tflagsOk = false\n\t\t}\n\t}\n\tif !flagsOk {\n\t\tos.Exit(1)\n\t}\n\tif *src1 == *src2 || *src1 == *dest || *src2 == *dest {\n\t\tlog.Fatalf(\"Different values must be supplied for each of --src1, --src2 and --dest\")\n\t}\n\n\thttpClient, err := google.DefaultClient(context.Background(), bigquery.Scope)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating http client: %v\", err)\n\t}\n\n\tclient, err := bigquery.NewClient(httpClient, *project)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating bigquery client: %v\", err)\n\t}\n\n\ts1 := &bigquery.Table{\n\t\tProjectID: *project,\n\t\tDatasetID: *dataset,\n\t\tTableID:   *src1,\n\t}\n\n\ts2 := &bigquery.Table{\n\t\tProjectID: *project,\n\t\tDatasetID: *dataset,\n\t\tTableID:   *src2,\n\t}\n\n\td := &bigquery.Table{\n\t\tProjectID:        *project,\n\t\tDatasetID:        *dataset,\n\t\tTableID:          *dest,\n\t\tWriteDisposition: bigquery.WriteTruncate,\n\t}\n\n\t// Concatenate data.\n\tjob, err := client.Copy(context.Background(), d, bigquery.Tables{s1, s2})\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Concatenating: %v\", err)\n\t}\n\n\tfmt.Printf(\"Job for concatenation operation: %+v\\n\", job)\n\tfmt.Printf(\"Waiting for job to complete.\\n\")\n\n\tfor range time.Tick(*pollint) {\n\t\tstatus, err := job.Status(context.Background())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failure determining status: %v\", err)\n\t\t\tbreak\n\t\t}\n\t\tif !status.Done() {\n\t\t\tcontinue\n\t\t}\n\t\tif err := status.Err(); err == nil {\n\t\t\tfmt.Printf(\"Success\\n\")\n\t\t} else {\n\t\t\tfmt.Printf(\"Failure: %+v\\n\", err)\n\t\t}\n\t\tbreak\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/bigquery/load/main.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// load is an example client of the bigquery client library.\n// It loads a file from Google Cloud Storage into a BigQuery table.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/cloud/bigquery\"\n)\n\nvar (\n\tproject  = flag.String(\"project\", \"\", \"The ID of a Google Cloud Platform project\")\n\tdataset  = flag.String(\"dataset\", \"\", \"The ID of a BigQuery dataset\")\n\ttable    = flag.String(\"table\", \"\", \"The ID of a BigQuery table to load data into\")\n\tbucket   = flag.String(\"bucket\", \"\", \"The name of a Google Cloud Storage bucket to load data from\")\n\tobject   = flag.String(\"object\", \"\", \"The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket\")\n\tskiprows = flag.Int64(\"skiprows\", 0, \"The number of rows of the source data to skip when loading\")\n\tpollint  = flag.Duration(\"pollint\", 10*time.Second, \"Polling interval for checking job status\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tflagsOk := true\n\tfor _, f := range []string{\"project\", \"dataset\", \"table\", \"bucket\", \"object\"} {\n\t\tif flag.Lookup(f).Value.String() == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"Flag --%s is required\\n\", f)\n\t\t\tflagsOk = false\n\t\t}\n\t}\n\tif !flagsOk {\n\t\tos.Exit(1)\n\t}\n\n\thttpClient, err := google.DefaultClient(context.Background(), bigquery.Scope)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating http client: %v\", err)\n\t}\n\n\tclient, err := bigquery.NewClient(httpClient, *project)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating bigquery client: %v\", err)\n\t}\n\n\ttable := &bigquery.Table{\n\t\tProjectID:        *project,\n\t\tDatasetID:        *dataset,\n\t\tTableID:          *table,\n\t\tWriteDisposition: bigquery.WriteTruncate,\n\t}\n\n\tgcs := client.NewGCSReference(fmt.Sprintf(\"gs://%s/%s\", *bucket, *object))\n\tgcs.SkipLeadingRows = *skiprows\n\n\t// Load data from Google Cloud Storage into a BigQuery table.\n\tjob, err := client.Copy(\n\t\tcontext.Background(), table, gcs,\n\t\tbigquery.MaxBadRecords(1),\n\t\tbigquery.AllowQuotedNewlines())\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Loading data: %v\", err)\n\t}\n\n\tfmt.Printf(\"Job for data load operation: %+v\\n\", job)\n\tfmt.Printf(\"Waiting for job to complete.\\n\")\n\n\tfor range time.Tick(*pollint) {\n\t\tstatus, err := job.Status(context.Background())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failure determining status: %v\", err)\n\t\t\tbreak\n\t\t}\n\t\tif !status.Done() {\n\t\t\tcontinue\n\t\t}\n\t\tif err := status.Err(); err == nil {\n\t\t\tfmt.Printf(\"Success\\n\")\n\t\t} else {\n\t\t\tfmt.Printf(\"Failure: %+v\\n\", err)\n\t\t}\n\t\tbreak\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/bigquery/query/main.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// query is an example client of the bigquery client library.\n// It submits a query and writes the result to a table.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/cloud/bigquery\"\n)\n\nvar (\n\tproject = flag.String(\"project\", \"\", \"The ID of a Google Cloud Platform project\")\n\tdataset = flag.String(\"dataset\", \"\", \"The ID of a BigQuery dataset\")\n\tq       = flag.String(\"q\", \"\", \"The query string\")\n\tdest    = flag.String(\"dest\", \"\", \"The ID of the BigQuery table to write the result to.  If unset, an ephemeral table ID will be generated.\")\n\tpollint = flag.Duration(\"pollint\", 10*time.Second, \"Polling interval for checking job status\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tflagsOk := true\n\tfor _, f := range []string{\"project\", \"dataset\", \"q\"} {\n\t\tif flag.Lookup(f).Value.String() == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"Flag --%s is required\\n\", f)\n\t\t\tflagsOk = false\n\t\t}\n\t}\n\tif !flagsOk {\n\t\tos.Exit(1)\n\t}\n\n\thttpClient, err := google.DefaultClient(context.Background(), bigquery.Scope)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating http client: %v\", err)\n\t}\n\n\tclient, err := bigquery.NewClient(httpClient, *project)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating bigquery client: %v\", err)\n\t}\n\n\td := &bigquery.Table{\n\t\tWriteDisposition: bigquery.WriteTruncate,\n\t}\n\n\tif *dest != \"\" {\n\t\td.ProjectID = *project\n\t\td.DatasetID = *dataset\n\t\td.TableID = *dest\n\t}\n\n\tquery := &bigquery.Query{\n\t\tQ:                *q,\n\t\tDefaultProjectID: *project,\n\t\tDefaultDatasetID: *dataset,\n\t}\n\n\t// Query data.\n\tjob, err := client.Copy(context.Background(), d, query)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Querying: %v\", err)\n\t}\n\n\tfmt.Printf(\"Job for query operation: %+v\\n\", job)\n\tfmt.Printf(\"Waiting for job to complete.\\n\")\n\n\tfor range time.Tick(*pollint) {\n\t\tstatus, err := job.Status(context.Background())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failure determining status: %v\", err)\n\t\t\tbreak\n\t\t}\n\t\tif !status.Done() {\n\t\t\tcontinue\n\t\t}\n\t\tif err := status.Err(); err == nil {\n\t\t\tfmt.Printf(\"Success\\n\")\n\t\t} else {\n\t\t\tfmt.Printf(\"Failure: %+v\\n\", err)\n\t\t}\n\t\tbreak\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/bigquery/read/main.go",
    "content": "// Copyright 2015 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// read is an example client of the bigquery client library.\n// It reads from a table, returning the data via an Iterator.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/cloud/bigquery\"\n)\n\nvar (\n\tproject = flag.String(\"project\", \"\", \"The ID of a Google Cloud Platform project\")\n\tdataset = flag.String(\"dataset\", \"\", \"The ID of a BigQuery dataset\")\n\ttable   = flag.String(\"table\", \"\", \"The ID of a BigQuery table.\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tflagsOk := true\n\tfor _, f := range []string{\"project\", \"dataset\", \"table\"} {\n\t\tif flag.Lookup(f).Value.String() == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"Flag --%s is required\\n\", f)\n\t\t\tflagsOk = false\n\t\t}\n\t}\n\tif !flagsOk {\n\t\tos.Exit(1)\n\t}\n\n\thttpClient, err := google.DefaultClient(context.Background(), bigquery.Scope)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating http client: %v\", err)\n\t}\n\n\tclient, err := bigquery.NewClient(httpClient, *project)\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating bigquery client: %v\", err)\n\t}\n\n\tit, err := client.Read(&bigquery.Table{\n\t\tProjectID: *project,\n\t\tDatasetID: *dataset,\n\t\tTableID:   *table,\n\t})\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Reading: %v\", err)\n\t}\n\n\t// one-space padding.\n\ttw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)\n\n\tfor it.Next(context.Background()) {\n\t\tvar vals bigquery.ValueList\n\t\tif err := it.Get(&vals); err != nil {\n\t\t\tfmt.Printf(\"err calling get: %v\\n\", err)\n\t\t} else {\n\t\t\tsep := \"\"\n\t\t\tfor _, v := range vals {\n\t\t\t\tfmt.Fprintf(tw, \"%s%v\", sep, v)\n\t\t\t\tsep = \"\\t\"\n\t\t\t}\n\t\t\tfmt.Fprintf(tw, \"\\n\")\n\t\t}\n\t}\n\ttw.Flush()\n\n\tif err := it.Err(); err != nil {\n\t\tfmt.Printf(\"err reading: %v\\n\")\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/pubsub/cmdline/main.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package main contains a simple command line tool for Cloud Pub/Sub\n// Cloud Pub/Sub docs: https://cloud.google.com/pubsub/docs\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/cloud\"\n\t\"google.golang.org/cloud/compute/metadata\"\n\t\"google.golang.org/cloud/pubsub\"\n)\n\nvar (\n\tjsonFile  = flag.String(\"j\", \"\", \"A path to your JSON key file for your service account downloaded from Google Developer Console, not needed if you run it on Compute Engine instances.\")\n\tprojID    = flag.String(\"p\", \"\", \"The ID of your Google Cloud project.\")\n\treportMPS = flag.Bool(\"report\", false, \"Reports the incoming/outgoing message rate in msg/sec if set.\")\n\tsize      = flag.Int(\"size\", 10, \"Batch size for pull_messages and publish_messages subcommands.\")\n)\n\nconst (\n\tusage = `Available arguments are:\n    create_topic <name>\n    delete_topic <name>\n    create_subscription <name> <linked_topic>\n    delete_subscription <name>\n    publish <topic> <message>\n    pull_messages <subscription> <numworkers>\n    publish_messages <topic> <numworkers>\n`\n\ttick = 1 * time.Second\n)\n\nfunc usageAndExit(msg string) {\n\tfmt.Fprintln(os.Stderr, msg)\n\tfmt.Println(\"Flags:\")\n\tflag.PrintDefaults()\n\tfmt.Fprint(os.Stderr, usage)\n\tos.Exit(2)\n}\n\n// Check the length of the arguments.\nfunc checkArgs(argv []string, min int) {\n\tif len(argv) < min {\n\t\tusageAndExit(\"Missing arguments\")\n\t}\n}\n\n// newClient creates http.Client with a jwt service account when\n// jsonFile flag is specified, otherwise by obtaining the GCE service\n// account's access token.\nfunc newClient(jsonFile string) (*http.Client, error) {\n\tif jsonFile != \"\" {\n\t\tjsonKey, err := ioutil.ReadFile(jsonFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconf, err := google.JWTConfigFromJSON(jsonKey, pubsub.ScopePubSub)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn conf.Client(oauth2.NoContext), nil\n\t}\n\tif metadata.OnGCE() {\n\t\tc := &http.Client{\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tSource: google.ComputeTokenSource(\"\"),\n\t\t\t},\n\t\t}\n\t\tif *projID == \"\" {\n\t\t\tprojectID, err := metadata.ProjectID()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"ProjectID failed, %v\", err)\n\t\t\t}\n\t\t\t*projID = projectID\n\t\t}\n\t\treturn c, nil\n\t}\n\treturn nil, errors.New(\"Could not create an authenticated client.\")\n}\n\nfunc listTopics(ctx context.Context, argv []string) {\n\tpanic(\"listTopics not implemented yet\")\n}\n\nfunc createTopic(ctx context.Context, argv []string) {\n\tcheckArgs(argv, 2)\n\ttopic := argv[1]\n\terr := pubsub.CreateTopic(ctx, topic)\n\tif err != nil {\n\t\tlog.Fatalf(\"CreateTopic failed, %v\", err)\n\t}\n\tfmt.Printf(\"Topic %s was created.\\n\", topic)\n}\n\nfunc deleteTopic(ctx context.Context, argv []string) {\n\tcheckArgs(argv, 2)\n\ttopic := argv[1]\n\terr := pubsub.DeleteTopic(ctx, topic)\n\tif err != nil {\n\t\tlog.Fatalf(\"DeleteTopic failed, %v\", err)\n\t}\n\tfmt.Printf(\"Topic %s was deleted.\\n\", topic)\n}\n\nfunc listSubscriptions(ctx context.Context, argv []string) {\n\tpanic(\"listSubscriptions not implemented yet\")\n}\n\nfunc createSubscription(ctx context.Context, argv []string) {\n\tcheckArgs(argv, 3)\n\tsub := argv[1]\n\ttopic := argv[2]\n\terr := pubsub.CreateSub(ctx, sub, topic, 60*time.Second, \"\")\n\tif err != nil {\n\t\tlog.Fatalf(\"CreateSub failed, %v\", err)\n\t}\n\tfmt.Printf(\"Subscription %s was created.\\n\", sub)\n}\n\nfunc deleteSubscription(ctx context.Context, argv []string) {\n\tcheckArgs(argv, 2)\n\tsub := argv[1]\n\terr := pubsub.DeleteSub(ctx, sub)\n\tif err != nil {\n\t\tlog.Fatalf(\"DeleteSub failed, %v\", err)\n\t}\n\tfmt.Printf(\"Subscription %s was deleted.\\n\", sub)\n}\n\nfunc publish(ctx context.Context, argv []string) {\n\tcheckArgs(argv, 3)\n\ttopic := argv[1]\n\tmessage := argv[2]\n\tmsgIDs, err := pubsub.Publish(ctx, topic, &pubsub.Message{\n\t\tData: []byte(message),\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"Publish failed, %v\", err)\n\t}\n\tfmt.Printf(\"Message '%s' published to a topic %s and the message id is %s\\n\", message, topic, msgIDs[0])\n}\n\ntype reporter struct {\n\treportTitle string\n\tlastC       uint64\n\tc           uint64\n\tresult      <-chan int\n}\n\nfunc (r *reporter) report() {\n\tticker := time.NewTicker(tick)\n\tdefer func() {\n\t\tticker.Stop()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tn := r.c - r.lastC\n\t\t\tr.lastC = r.c\n\t\t\tmps := n / uint64(tick/time.Second)\n\t\t\tlog.Printf(\"%s ~%d msgs/s, total: %d\", r.reportTitle, mps, r.c)\n\t\tcase n := <-r.result:\n\t\t\tr.c += uint64(n)\n\t\t}\n\t}\n}\n\nfunc ack(ctx context.Context, sub string, ackID ...string) {\n\terr := pubsub.Ack(ctx, sub, ackID...)\n\tif err != nil {\n\t\tlog.Printf(\"Ack failed, %v\\n\", err)\n\t}\n}\n\nfunc pullLoop(ctx context.Context, sub string, result chan<- int) {\n\tfor {\n\t\tmsgs, err := pubsub.PullWait(ctx, sub, *size)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"PullWait failed, %v\\n\", err)\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tif len(msgs) == 0 {\n\t\t\tlog.Println(\"Received no messages\")\n\t\t\tcontinue\n\t\t}\n\t\tif *reportMPS {\n\t\t\tresult <- len(msgs)\n\t\t}\n\t\tackIDs := make([]string, len(msgs))\n\t\tfor i, msg := range msgs {\n\t\t\tif !*reportMPS {\n\t\t\t\tfmt.Printf(\"Got a message: %s\\n\", msg.Data)\n\t\t\t}\n\t\t\tackIDs[i] = msg.AckID\n\t\t}\n\t\tgo ack(ctx, sub, ackIDs...)\n\t}\n}\n\nfunc pullMessages(ctx context.Context, argv []string) {\n\tcheckArgs(argv, 3)\n\tsub := argv[1]\n\tworkers, err := strconv.Atoi(argv[2])\n\tif err != nil {\n\t\tlog.Fatalf(\"Atoi failed, %v\", err)\n\t}\n\tresult := make(chan int, 1024)\n\tfor i := 0; i < int(workers); i++ {\n\t\tgo pullLoop(ctx, sub, result)\n\t}\n\tif *reportMPS {\n\t\tr := reporter{reportTitle: \"Received\", result: result}\n\t\tr.report()\n\t} else {\n\t\tselect {}\n\t}\n}\n\nfunc publishLoop(ctx context.Context, topic string, workerid int, result chan<- int) {\n\tvar r uint64\n\tfor {\n\t\tmsgs := make([]*pubsub.Message, *size)\n\t\tfor i := 0; i < *size; i++ {\n\t\t\tmsgs[i] = &pubsub.Message{\n\t\t\t\tData: []byte(fmt.Sprintf(\"Worker: %d, Round: %d, Message: %d\", workerid, r, i)),\n\t\t\t}\n\t\t}\n\t\t_, err := pubsub.Publish(ctx, topic, msgs...)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Publish failed, %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tr++\n\t\tif *reportMPS {\n\t\t\tresult <- *size\n\t\t}\n\t}\n}\n\nfunc publishMessages(ctx context.Context, argv []string) {\n\tcheckArgs(argv, 3)\n\ttopic := argv[1]\n\tworkers, err := strconv.Atoi(argv[2])\n\tif err != nil {\n\t\tlog.Fatalf(\"Atoi failed, %v\", err)\n\t}\n\tresult := make(chan int, 1024)\n\tfor i := 0; i < int(workers); i++ {\n\t\tgo publishLoop(ctx, topic, i, result)\n\t}\n\tif *reportMPS {\n\t\tr := reporter{reportTitle: \"Sent\", result: result}\n\t\tr.report()\n\t} else {\n\t\tselect {}\n\t}\n}\n\n// This example demonstrates calling the Cloud Pub/Sub API. As of 22\n// Oct 2014, the Cloud Pub/Sub API is only available if you're\n// whitelisted. If you're interested in using it, please apply for the\n// Limited Preview program at the following form:\n// http://goo.gl/Wql9HL\n//\n// Also, before running this example, be sure to enable Cloud Pub/Sub\n// service on your project in Developer Console at:\n// https://console.developers.google.com/\n//\n// Unless you run this sample on Compute Engine instance, please\n// create a new service account and download a JSON key file for it at\n// the developer console: https://console.developers.google.com/\n//\n// It has the following subcommands:\n//\n//  create_topic <name>\n//  delete_topic <name>\n//  create_subscription <name> <linked_topic>\n//  delete_subscription <name>\n//  publish <topic> <message>\n//  pull_messages <subscription> <numworkers>\n//  publish_messages <topic> <numworkers>\n//\n// You can choose any names for topic and subscription as long as they\n// follow the naming rule described at:\n// https://cloud.google.com/pubsub/overview#names\n//\n// You can create/delete topics/subscriptions by self-explanatory\n// subcommands.\n//\n// The \"publish\" subcommand is for publishing a single message to a\n// specified Cloud Pub/Sub topic.\n//\n// The \"pull_messages\" subcommand is for continuously pulling messages\n// from a specified Cloud Pub/Sub subscription with specified number\n// of workers.\n//\n// The \"publish_messages\" subcommand is for continuously publishing\n// messages to a specified Cloud Pub/Sub topic with specified number\n// of workers.\nfunc main() {\n\tflag.Parse()\n\targv := flag.Args()\n\tcheckArgs(argv, 1)\n\tclient, err := newClient(*jsonFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"clientAndId failed, %v\", err)\n\t}\n\tif *projID == \"\" {\n\t\tusageAndExit(\"Please specify Project ID.\")\n\t}\n\tctx := cloud.NewContext(*projID, client)\n\tm := map[string]func(ctx context.Context, argv []string){\n\t\t\"create_topic\":        createTopic,\n\t\t\"delete_topic\":        deleteTopic,\n\t\t\"create_subscription\": createSubscription,\n\t\t\"delete_subscription\": deleteSubscription,\n\t\t\"publish\":             publish,\n\t\t\"pull_messages\":       pullMessages,\n\t\t\"publish_messages\":    publishMessages,\n\t}\n\tsubcommand := argv[0]\n\tf, ok := m[subcommand]\n\tif !ok {\n\t\tusageAndExit(fmt.Sprintf(\"Function not found for %s\", subcommand))\n\t}\n\tf(ctx, argv)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/storage/appengine/app.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package gcsdemo is an example App Engine or Mananged VM app using the Google Cloud Storage API.\npackage gcsdemo\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/file\"\n\t\"google.golang.org/appengine/log\"\n\t\"google.golang.org/appengine/urlfetch\"\n\t\"google.golang.org/cloud\"\n\t\"google.golang.org/cloud/storage\"\n)\n\n// bucket is a local cache of the app's default bucket name.\nvar bucket string // or: var bucket = \"<your-app-id>.appspot.com\"\n\nfunc init() {\n\thttp.HandleFunc(\"/\", handler)\n}\n\n// demo struct holds information needed to run the various demo functions.\ntype demo struct {\n\tc   context.Context\n\tw   http.ResponseWriter\n\tctx context.Context\n\t// cleanUp is a list of filenames that need cleaning up at the end of the demo.\n\tcleanUp []string\n\t// failed indicates that one or more of the demo steps failed.\n\tfailed bool\n}\n\nfunc (d *demo) errorf(format string, args ...interface{}) {\n\td.failed = true\n\tlog.Errorf(d.c, format, args...)\n}\n\n// handler is the main demo entry point that calls the GCS operations.\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tc := appengine.NewContext(r)\n\tif bucket == \"\" {\n\t\tvar err error\n\t\tif bucket, err = file.DefaultBucketName(c); err != nil {\n\t\t\tlog.Errorf(c, \"failed to get default GCS bucket name: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\thc := &http.Client{\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: google.AppEngineTokenSource(c, storage.ScopeFullControl),\n\t\t\tBase:   &urlfetch.Transport{Context: c},\n\t\t},\n\t}\n\tctx := cloud.NewContext(appengine.AppID(c), hc)\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tfmt.Fprintf(w, \"Demo GCS Application running from Version: %v\\n\", appengine.VersionID(c))\n\tfmt.Fprintf(w, \"Using bucket name: %v\\n\\n\", bucket)\n\n\td := &demo{\n\t\tc:   c,\n\t\tw:   w,\n\t\tctx: ctx,\n\t}\n\n\tn := \"demo-testfile-go\"\n\td.createFile(n)\n\td.readFile(n)\n\td.copyFile(n)\n\td.statFile(n)\n\td.createListFiles()\n\td.listBucket()\n\td.listBucketDirMode()\n\td.defaultACL()\n\td.putDefaultACLRule()\n\td.deleteDefaultACLRule()\n\td.bucketACL()\n\td.putBucketACLRule()\n\td.deleteBucketACLRule()\n\td.acl(n)\n\td.putACLRule(n)\n\td.deleteACLRule(n)\n\td.deleteFiles()\n\n\tif d.failed {\n\t\tio.WriteString(w, \"\\nDemo failed.\\n\")\n\t} else {\n\t\tio.WriteString(w, \"\\nDemo succeeded.\\n\")\n\t}\n}\n\n// createFile creates a file in Google Cloud Storage.\nfunc (d *demo) createFile(fileName string) {\n\tfmt.Fprintf(d.w, \"Creating file /%v/%v\\n\", bucket, fileName)\n\n\twc := storage.NewWriter(d.ctx, bucket, fileName)\n\twc.ContentType = \"text/plain\"\n\twc.Metadata = map[string]string{\n\t\t\"x-goog-meta-foo\": \"foo\",\n\t\t\"x-goog-meta-bar\": \"bar\",\n\t}\n\td.cleanUp = append(d.cleanUp, fileName)\n\n\tif _, err := wc.Write([]byte(\"abcde\\n\")); err != nil {\n\t\td.errorf(\"createFile: unable to write data to bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tif _, err := wc.Write([]byte(strings.Repeat(\"f\", 1024*4) + \"\\n\")); err != nil {\n\t\td.errorf(\"createFile: unable to write data to bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tif err := wc.Close(); err != nil {\n\t\td.errorf(\"createFile: unable to close bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n}\n\n// readFile reads the named file in Google Cloud Storage.\nfunc (d *demo) readFile(fileName string) {\n\tio.WriteString(d.w, \"\\nAbbreviated file content (first line and last 1K):\\n\")\n\n\trc, err := storage.NewReader(d.ctx, bucket, fileName)\n\tif err != nil {\n\t\td.errorf(\"readFile: unable to open file from bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tdefer rc.Close()\n\tslurp, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\td.errorf(\"readFile: unable to read data from bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(d.w, \"%s\\n\", bytes.SplitN(slurp, []byte(\"\\n\"), 2)[0])\n\tif len(slurp) > 1024 {\n\t\tfmt.Fprintf(d.w, \"...%s\\n\", slurp[len(slurp)-1024:])\n\t} else {\n\t\tfmt.Fprintf(d.w, \"%s\\n\", slurp)\n\t}\n}\n\n// copyFile copies a file in Google Cloud Storage.\nfunc (d *demo) copyFile(fileName string) {\n\tcopyName := fileName + \"-copy\"\n\tfmt.Fprintf(d.w, \"Copying file /%v/%v to /%v/%v:\\n\", bucket, fileName, bucket, copyName)\n\n\tobj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil)\n\tif err != nil {\n\t\td.errorf(\"copyFile: unable to copy /%v/%v to bucket %q, file %q: %v\", bucket, fileName, bucket, copyName, err)\n\t\treturn\n\t}\n\td.cleanUp = append(d.cleanUp, copyName)\n\n\td.dumpStats(obj)\n}\n\nfunc (d *demo) dumpStats(obj *storage.Object) {\n\tfmt.Fprintf(d.w, \"(filename: /%v/%v, \", obj.Bucket, obj.Name)\n\tfmt.Fprintf(d.w, \"ContentType: %q, \", obj.ContentType)\n\tfmt.Fprintf(d.w, \"ACL: %#v, \", obj.ACL)\n\tfmt.Fprintf(d.w, \"Owner: %v, \", obj.Owner)\n\tfmt.Fprintf(d.w, \"ContentEncoding: %q, \", obj.ContentEncoding)\n\tfmt.Fprintf(d.w, \"Size: %v, \", obj.Size)\n\tfmt.Fprintf(d.w, \"MD5: %q, \", obj.MD5)\n\tfmt.Fprintf(d.w, \"CRC32C: %q, \", obj.CRC32C)\n\tfmt.Fprintf(d.w, \"Metadata: %#v, \", obj.Metadata)\n\tfmt.Fprintf(d.w, \"MediaLink: %q, \", obj.MediaLink)\n\tfmt.Fprintf(d.w, \"StorageClass: %q, \", obj.StorageClass)\n\tif !obj.Deleted.IsZero() {\n\t\tfmt.Fprintf(d.w, \"Deleted: %v, \", obj.Deleted)\n\t}\n\tfmt.Fprintf(d.w, \"Updated: %v)\\n\", obj.Updated)\n}\n\n// statFile reads the stats of the named file in Google Cloud Storage.\nfunc (d *demo) statFile(fileName string) {\n\tio.WriteString(d.w, \"\\nFile stat:\\n\")\n\n\tobj, err := storage.StatObject(d.ctx, bucket, fileName)\n\tif err != nil {\n\t\td.errorf(\"statFile: unable to stat file from bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\n\td.dumpStats(obj)\n}\n\n// createListFiles creates files that will be used by listBucket.\nfunc (d *demo) createListFiles() {\n\tio.WriteString(d.w, \"\\nCreating more files for listbucket...\\n\")\n\tfor _, n := range []string{\"foo1\", \"foo2\", \"bar\", \"bar/1\", \"bar/2\", \"boo/\"} {\n\t\td.createFile(n)\n\t}\n}\n\n// listBucket lists the contents of a bucket in Google Cloud Storage.\nfunc (d *demo) listBucket() {\n\tio.WriteString(d.w, \"\\nListbucket result:\\n\")\n\n\tquery := &storage.Query{Prefix: \"foo\"}\n\tfor query != nil {\n\t\tobjs, err := storage.ListObjects(d.ctx, bucket, query)\n\t\tif err != nil {\n\t\t\td.errorf(\"listBucket: unable to list bucket %q: %v\", bucket, err)\n\t\t\treturn\n\t\t}\n\t\tquery = objs.Next\n\n\t\tfor _, obj := range objs.Results {\n\t\t\td.dumpStats(obj)\n\t\t}\n\t}\n}\n\nfunc (d *demo) listDir(name, indent string) {\n\tquery := &storage.Query{Prefix: name, Delimiter: \"/\"}\n\tfor query != nil {\n\t\tobjs, err := storage.ListObjects(d.ctx, bucket, query)\n\t\tif err != nil {\n\t\t\td.errorf(\"listBucketDirMode: unable to list bucket %q: %v\", bucket, err)\n\t\t\treturn\n\t\t}\n\t\tquery = objs.Next\n\n\t\tfor _, obj := range objs.Results {\n\t\t\tfmt.Fprint(d.w, indent)\n\t\t\td.dumpStats(obj)\n\t\t}\n\t\tfor _, dir := range objs.Prefixes {\n\t\t\tfmt.Fprintf(d.w, \"%v(directory: /%v/%v)\\n\", indent, bucket, dir)\n\t\t\td.listDir(dir, indent+\"  \")\n\t\t}\n\t}\n}\n\n// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage.\nfunc (d *demo) listBucketDirMode() {\n\tio.WriteString(d.w, \"\\nListbucket directory mode result:\\n\")\n\td.listDir(\"b\", \"\")\n}\n\n// dumpDefaultACL prints out the default object ACL for this bucket.\nfunc (d *demo) dumpDefaultACL() {\n\tacl, err := storage.DefaultACL(d.ctx, bucket)\n\tif err != nil {\n\t\td.errorf(\"defaultACL: unable to list default object ACL for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\tfor _, v := range acl {\n\t\tfmt.Fprintf(d.w, \"Entity: %q, Role: %q\\n\", v.Entity, v.Role)\n\t}\n}\n\n// defaultACL displays the default object ACL for this bucket.\nfunc (d *demo) defaultACL() {\n\tio.WriteString(d.w, \"\\nDefault object ACL:\\n\")\n\td.dumpDefaultACL()\n}\n\n// putDefaultACLRule adds the \"allUsers\" default object ACL rule for this bucket.\nfunc (d *demo) putDefaultACLRule() {\n\tio.WriteString(d.w, \"\\nPut Default object ACL Rule:\\n\")\n\terr := storage.PutDefaultACLRule(d.ctx, bucket, \"allUsers\", storage.RoleReader)\n\tif err != nil {\n\t\td.errorf(\"putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpDefaultACL()\n}\n\n// deleteDefaultACLRule deleted the \"allUsers\" default object ACL rule for this bucket.\nfunc (d *demo) deleteDefaultACLRule() {\n\tio.WriteString(d.w, \"\\nDelete Default object ACL Rule:\\n\")\n\terr := storage.DeleteDefaultACLRule(d.ctx, bucket, \"allUsers\")\n\tif err != nil {\n\t\td.errorf(\"deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpDefaultACL()\n}\n\n// dumpBucketACL prints out the bucket ACL.\nfunc (d *demo) dumpBucketACL() {\n\tacl, err := storage.BucketACL(d.ctx, bucket)\n\tif err != nil {\n\t\td.errorf(\"dumpBucketACL: unable to list bucket ACL for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\tfor _, v := range acl {\n\t\tfmt.Fprintf(d.w, \"Entity: %q, Role: %q\\n\", v.Entity, v.Role)\n\t}\n}\n\n// bucketACL displays the bucket ACL for this bucket.\nfunc (d *demo) bucketACL() {\n\tio.WriteString(d.w, \"\\nBucket ACL:\\n\")\n\td.dumpBucketACL()\n}\n\n// putBucketACLRule adds the \"allUsers\" bucket ACL rule for this bucket.\nfunc (d *demo) putBucketACLRule() {\n\tio.WriteString(d.w, \"\\nPut Bucket ACL Rule:\\n\")\n\terr := storage.PutBucketACLRule(d.ctx, bucket, \"allUsers\", storage.RoleReader)\n\tif err != nil {\n\t\td.errorf(\"putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpBucketACL()\n}\n\n// deleteBucketACLRule deleted the \"allUsers\" bucket ACL rule for this bucket.\nfunc (d *demo) deleteBucketACLRule() {\n\tio.WriteString(d.w, \"\\nDelete Bucket ACL Rule:\\n\")\n\terr := storage.DeleteBucketACLRule(d.ctx, bucket, \"allUsers\")\n\tif err != nil {\n\t\td.errorf(\"deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpBucketACL()\n}\n\n// dumpACL prints out the ACL of the named file.\nfunc (d *demo) dumpACL(fileName string) {\n\tacl, err := storage.ACL(d.ctx, bucket, fileName)\n\tif err != nil {\n\t\td.errorf(\"dumpACL: unable to list file ACL for bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tfor _, v := range acl {\n\t\tfmt.Fprintf(d.w, \"Entity: %q, Role: %q\\n\", v.Entity, v.Role)\n\t}\n}\n\n// acl displays the ACL for the named file.\nfunc (d *demo) acl(fileName string) {\n\tfmt.Fprintf(d.w, \"\\nACL for file %v:\\n\", fileName)\n\td.dumpACL(fileName)\n}\n\n// putACLRule adds the \"allUsers\" ACL rule for the named file.\nfunc (d *demo) putACLRule(fileName string) {\n\tfmt.Fprintf(d.w, \"\\nPut ACL rule for file %v:\\n\", fileName)\n\terr := storage.PutACLRule(d.ctx, bucket, fileName, \"allUsers\", storage.RoleReader)\n\tif err != nil {\n\t\td.errorf(\"putACLRule: unable to save ACL rule for bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\td.dumpACL(fileName)\n}\n\n// deleteACLRule deleted the \"allUsers\" ACL rule for the named file.\nfunc (d *demo) deleteACLRule(fileName string) {\n\tfmt.Fprintf(d.w, \"\\nDelete ACL rule for file %v:\\n\", fileName)\n\terr := storage.DeleteACLRule(d.ctx, bucket, fileName, \"allUsers\")\n\tif err != nil {\n\t\td.errorf(\"deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\td.dumpACL(fileName)\n}\n\n// deleteFiles deletes all the temporary files from a bucket created by this demo.\nfunc (d *demo) deleteFiles() {\n\tio.WriteString(d.w, \"\\nDeleting files...\\n\")\n\tfor _, v := range d.cleanUp {\n\t\tfmt.Fprintf(d.w, \"Deleting file %v\\n\", v)\n\t\tif err := storage.DeleteObject(d.ctx, bucket, v); err != nil {\n\t\t\td.errorf(\"deleteFiles: unable to delete bucket %q, file %q: %v\", bucket, v, err)\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/storage/appengine/app.yaml",
    "content": "application: <your-app-id-here>\nversion: v1\nruntime: go\napi_version: go1\n\nhandlers:\n- url: /.*\n  script: _go_app\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/storage/appenginevm/app.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package gcsdemo is an example App Engine or Mananged VM app using the Google Cloud Storage API.\npackage gcsdemo\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/appengine\"\n\t\"google.golang.org/appengine/file\"\n\t\"google.golang.org/appengine/log\"\n\t\"google.golang.org/appengine/urlfetch\"\n\t\"google.golang.org/cloud\"\n\t\"google.golang.org/cloud/storage\"\n)\n\n// bucket is a local cache of the app's default bucket name.\nvar bucket string // or: var bucket = \"<your-app-id>.appspot.com\"\n\nfunc init() {\n\thttp.HandleFunc(\"/\", handler)\n}\n\n// demo struct holds information needed to run the various demo functions.\ntype demo struct {\n\tc   context.Context\n\tw   http.ResponseWriter\n\tctx context.Context\n\t// cleanUp is a list of filenames that need cleaning up at the end of the demo.\n\tcleanUp []string\n\t// failed indicates that one or more of the demo steps failed.\n\tfailed bool\n}\n\nfunc (d *demo) errorf(format string, args ...interface{}) {\n\td.failed = true\n\tlog.Errorf(d.c, format, args...)\n}\n\n// handler is the main demo entry point that calls the GCS operations.\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tc := appengine.NewContext(r)\n\tif bucket == \"\" {\n\t\tvar err error\n\t\tif bucket, err = file.DefaultBucketName(c); err != nil {\n\t\t\tlog.Errorf(c, \"failed to get default GCS bucket name: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\thc := &http.Client{\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: google.AppEngineTokenSource(c, storage.ScopeFullControl),\n\t\t\tBase:   &urlfetch.Transport{Context: c},\n\t\t},\n\t}\n\tctx := cloud.NewContext(appengine.AppID(c), hc)\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tfmt.Fprintf(w, \"Demo GCS Application running from Version: %v\\n\", appengine.VersionID(c))\n\tfmt.Fprintf(w, \"Using bucket name: %v\\n\\n\", bucket)\n\n\td := &demo{\n\t\tc:   c,\n\t\tw:   w,\n\t\tctx: ctx,\n\t}\n\n\tn := \"demo-testfile-go\"\n\td.createFile(n)\n\td.readFile(n)\n\td.copyFile(n)\n\td.statFile(n)\n\td.createListFiles()\n\td.listBucket()\n\td.listBucketDirMode()\n\td.defaultACL()\n\td.putDefaultACLRule()\n\td.deleteDefaultACLRule()\n\td.bucketACL()\n\td.putBucketACLRule()\n\td.deleteBucketACLRule()\n\td.acl(n)\n\td.putACLRule(n)\n\td.deleteACLRule(n)\n\td.deleteFiles()\n\n\tif d.failed {\n\t\tio.WriteString(w, \"\\nDemo failed.\\n\")\n\t} else {\n\t\tio.WriteString(w, \"\\nDemo succeeded.\\n\")\n\t}\n}\n\n// createFile creates a file in Google Cloud Storage.\nfunc (d *demo) createFile(fileName string) {\n\tfmt.Fprintf(d.w, \"Creating file /%v/%v\\n\", bucket, fileName)\n\n\twc := storage.NewWriter(d.ctx, bucket, fileName)\n\twc.ContentType = \"text/plain\"\n\twc.Metadata = map[string]string{\n\t\t\"x-goog-meta-foo\": \"foo\",\n\t\t\"x-goog-meta-bar\": \"bar\",\n\t}\n\td.cleanUp = append(d.cleanUp, fileName)\n\n\tif _, err := wc.Write([]byte(\"abcde\\n\")); err != nil {\n\t\td.errorf(\"createFile: unable to write data to bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tif _, err := wc.Write([]byte(strings.Repeat(\"f\", 1024*4) + \"\\n\")); err != nil {\n\t\td.errorf(\"createFile: unable to write data to bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tif err := wc.Close(); err != nil {\n\t\td.errorf(\"createFile: unable to close bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n}\n\n// readFile reads the named file in Google Cloud Storage.\nfunc (d *demo) readFile(fileName string) {\n\tio.WriteString(d.w, \"\\nAbbreviated file content (first line and last 1K):\\n\")\n\n\trc, err := storage.NewReader(d.ctx, bucket, fileName)\n\tif err != nil {\n\t\td.errorf(\"readFile: unable to open file from bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tdefer rc.Close()\n\tslurp, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\td.errorf(\"readFile: unable to read data from bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(d.w, \"%s\\n\", bytes.SplitN(slurp, []byte(\"\\n\"), 2)[0])\n\tif len(slurp) > 1024 {\n\t\tfmt.Fprintf(d.w, \"...%s\\n\", slurp[len(slurp)-1024:])\n\t} else {\n\t\tfmt.Fprintf(d.w, \"%s\\n\", slurp)\n\t}\n}\n\n// copyFile copies a file in Google Cloud Storage.\nfunc (d *demo) copyFile(fileName string) {\n\tcopyName := fileName + \"-copy\"\n\tfmt.Fprintf(d.w, \"Copying file /%v/%v to /%v/%v:\\n\", bucket, fileName, bucket, copyName)\n\n\tobj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil)\n\tif err != nil {\n\t\td.errorf(\"copyFile: unable to copy /%v/%v to bucket %q, file %q: %v\", bucket, fileName, bucket, copyName, err)\n\t\treturn\n\t}\n\td.cleanUp = append(d.cleanUp, copyName)\n\n\td.dumpStats(obj)\n}\n\nfunc (d *demo) dumpStats(obj *storage.Object) {\n\tfmt.Fprintf(d.w, \"(filename: /%v/%v, \", obj.Bucket, obj.Name)\n\tfmt.Fprintf(d.w, \"ContentType: %q, \", obj.ContentType)\n\tfmt.Fprintf(d.w, \"ACL: %#v, \", obj.ACL)\n\tfmt.Fprintf(d.w, \"Owner: %v, \", obj.Owner)\n\tfmt.Fprintf(d.w, \"ContentEncoding: %q, \", obj.ContentEncoding)\n\tfmt.Fprintf(d.w, \"Size: %v, \", obj.Size)\n\tfmt.Fprintf(d.w, \"MD5: %q, \", obj.MD5)\n\tfmt.Fprintf(d.w, \"CRC32C: %q, \", obj.CRC32C)\n\tfmt.Fprintf(d.w, \"Metadata: %#v, \", obj.Metadata)\n\tfmt.Fprintf(d.w, \"MediaLink: %q, \", obj.MediaLink)\n\tfmt.Fprintf(d.w, \"StorageClass: %q, \", obj.StorageClass)\n\tif !obj.Deleted.IsZero() {\n\t\tfmt.Fprintf(d.w, \"Deleted: %v, \", obj.Deleted)\n\t}\n\tfmt.Fprintf(d.w, \"Updated: %v)\\n\", obj.Updated)\n}\n\n// statFile reads the stats of the named file in Google Cloud Storage.\nfunc (d *demo) statFile(fileName string) {\n\tio.WriteString(d.w, \"\\nFile stat:\\n\")\n\n\tobj, err := storage.StatObject(d.ctx, bucket, fileName)\n\tif err != nil {\n\t\td.errorf(\"statFile: unable to stat file from bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\n\td.dumpStats(obj)\n}\n\n// createListFiles creates files that will be used by listBucket.\nfunc (d *demo) createListFiles() {\n\tio.WriteString(d.w, \"\\nCreating more files for listbucket...\\n\")\n\tfor _, n := range []string{\"foo1\", \"foo2\", \"bar\", \"bar/1\", \"bar/2\", \"boo/\"} {\n\t\td.createFile(n)\n\t}\n}\n\n// listBucket lists the contents of a bucket in Google Cloud Storage.\nfunc (d *demo) listBucket() {\n\tio.WriteString(d.w, \"\\nListbucket result:\\n\")\n\n\tquery := &storage.Query{Prefix: \"foo\"}\n\tfor query != nil {\n\t\tobjs, err := storage.ListObjects(d.ctx, bucket, query)\n\t\tif err != nil {\n\t\t\td.errorf(\"listBucket: unable to list bucket %q: %v\", bucket, err)\n\t\t\treturn\n\t\t}\n\t\tquery = objs.Next\n\n\t\tfor _, obj := range objs.Results {\n\t\t\td.dumpStats(obj)\n\t\t}\n\t}\n}\n\nfunc (d *demo) listDir(name, indent string) {\n\tquery := &storage.Query{Prefix: name, Delimiter: \"/\"}\n\tfor query != nil {\n\t\tobjs, err := storage.ListObjects(d.ctx, bucket, query)\n\t\tif err != nil {\n\t\t\td.errorf(\"listBucketDirMode: unable to list bucket %q: %v\", bucket, err)\n\t\t\treturn\n\t\t}\n\t\tquery = objs.Next\n\n\t\tfor _, obj := range objs.Results {\n\t\t\tfmt.Fprint(d.w, indent)\n\t\t\td.dumpStats(obj)\n\t\t}\n\t\tfor _, dir := range objs.Prefixes {\n\t\t\tfmt.Fprintf(d.w, \"%v(directory: /%v/%v)\\n\", indent, bucket, dir)\n\t\t\td.listDir(dir, indent+\"  \")\n\t\t}\n\t}\n}\n\n// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage.\nfunc (d *demo) listBucketDirMode() {\n\tio.WriteString(d.w, \"\\nListbucket directory mode result:\\n\")\n\td.listDir(\"b\", \"\")\n}\n\n// dumpDefaultACL prints out the default object ACL for this bucket.\nfunc (d *demo) dumpDefaultACL() {\n\tacl, err := storage.DefaultACL(d.ctx, bucket)\n\tif err != nil {\n\t\td.errorf(\"defaultACL: unable to list default object ACL for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\tfor _, v := range acl {\n\t\tfmt.Fprintf(d.w, \"Entity: %q, Role: %q\\n\", v.Entity, v.Role)\n\t}\n}\n\n// defaultACL displays the default object ACL for this bucket.\nfunc (d *demo) defaultACL() {\n\tio.WriteString(d.w, \"\\nDefault object ACL:\\n\")\n\td.dumpDefaultACL()\n}\n\n// putDefaultACLRule adds the \"allUsers\" default object ACL rule for this bucket.\nfunc (d *demo) putDefaultACLRule() {\n\tio.WriteString(d.w, \"\\nPut Default object ACL Rule:\\n\")\n\terr := storage.PutDefaultACLRule(d.ctx, bucket, \"allUsers\", storage.RoleReader)\n\tif err != nil {\n\t\td.errorf(\"putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpDefaultACL()\n}\n\n// deleteDefaultACLRule deleted the \"allUsers\" default object ACL rule for this bucket.\nfunc (d *demo) deleteDefaultACLRule() {\n\tio.WriteString(d.w, \"\\nDelete Default object ACL Rule:\\n\")\n\terr := storage.DeleteDefaultACLRule(d.ctx, bucket, \"allUsers\")\n\tif err != nil {\n\t\td.errorf(\"deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpDefaultACL()\n}\n\n// dumpBucketACL prints out the bucket ACL.\nfunc (d *demo) dumpBucketACL() {\n\tacl, err := storage.BucketACL(d.ctx, bucket)\n\tif err != nil {\n\t\td.errorf(\"dumpBucketACL: unable to list bucket ACL for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\tfor _, v := range acl {\n\t\tfmt.Fprintf(d.w, \"Entity: %q, Role: %q\\n\", v.Entity, v.Role)\n\t}\n}\n\n// bucketACL displays the bucket ACL for this bucket.\nfunc (d *demo) bucketACL() {\n\tio.WriteString(d.w, \"\\nBucket ACL:\\n\")\n\td.dumpBucketACL()\n}\n\n// putBucketACLRule adds the \"allUsers\" bucket ACL rule for this bucket.\nfunc (d *demo) putBucketACLRule() {\n\tio.WriteString(d.w, \"\\nPut Bucket ACL Rule:\\n\")\n\terr := storage.PutBucketACLRule(d.ctx, bucket, \"allUsers\", storage.RoleReader)\n\tif err != nil {\n\t\td.errorf(\"putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpBucketACL()\n}\n\n// deleteBucketACLRule deleted the \"allUsers\" bucket ACL rule for this bucket.\nfunc (d *demo) deleteBucketACLRule() {\n\tio.WriteString(d.w, \"\\nDelete Bucket ACL Rule:\\n\")\n\terr := storage.DeleteBucketACLRule(d.ctx, bucket, \"allUsers\")\n\tif err != nil {\n\t\td.errorf(\"deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v\", bucket, err)\n\t\treturn\n\t}\n\td.dumpBucketACL()\n}\n\n// dumpACL prints out the ACL of the named file.\nfunc (d *demo) dumpACL(fileName string) {\n\tacl, err := storage.ACL(d.ctx, bucket, fileName)\n\tif err != nil {\n\t\td.errorf(\"dumpACL: unable to list file ACL for bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\tfor _, v := range acl {\n\t\tfmt.Fprintf(d.w, \"Entity: %q, Role: %q\\n\", v.Entity, v.Role)\n\t}\n}\n\n// acl displays the ACL for the named file.\nfunc (d *demo) acl(fileName string) {\n\tfmt.Fprintf(d.w, \"\\nACL for file %v:\\n\", fileName)\n\td.dumpACL(fileName)\n}\n\n// putACLRule adds the \"allUsers\" ACL rule for the named file.\nfunc (d *demo) putACLRule(fileName string) {\n\tfmt.Fprintf(d.w, \"\\nPut ACL rule for file %v:\\n\", fileName)\n\terr := storage.PutACLRule(d.ctx, bucket, fileName, \"allUsers\", storage.RoleReader)\n\tif err != nil {\n\t\td.errorf(\"putACLRule: unable to save ACL rule for bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\td.dumpACL(fileName)\n}\n\n// deleteACLRule deleted the \"allUsers\" ACL rule for the named file.\nfunc (d *demo) deleteACLRule(fileName string) {\n\tfmt.Fprintf(d.w, \"\\nDelete ACL rule for file %v:\\n\", fileName)\n\terr := storage.DeleteACLRule(d.ctx, bucket, fileName, \"allUsers\")\n\tif err != nil {\n\t\td.errorf(\"deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v\", bucket, fileName, err)\n\t\treturn\n\t}\n\td.dumpACL(fileName)\n}\n\n// deleteFiles deletes all the temporary files from a bucket created by this demo.\nfunc (d *demo) deleteFiles() {\n\tio.WriteString(d.w, \"\\nDeleting files...\\n\")\n\tfor _, v := range d.cleanUp {\n\t\tfmt.Fprintf(d.w, \"Deleting file %v\\n\", v)\n\t\tif err := storage.DeleteObject(d.ctx, bucket, v); err != nil {\n\t\t\td.errorf(\"deleteFiles: unable to delete bucket %q, file %q: %v\", bucket, v, err)\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/examples/storage/appenginevm/app.yaml",
    "content": "application: <your-app-id-here>\nversion: v1\nruntime: go\napi_version: go1\nvm: true\n\nmanual_scaling:\n  instances: 1\n\nhandlers:\n- url: /.*\n  script: _go_app\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/internal/cloud.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package internal provides support for the cloud packages.\n//\n// Users should not import this package directly.\npackage internal\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"sync\"\n\n\t\"golang.org/x/net/context\"\n)\n\ntype contextKey struct{}\n\nfunc WithContext(parent context.Context, projID string, c *http.Client) context.Context {\n\tif c == nil {\n\t\tpanic(\"nil *http.Client passed to WithContext\")\n\t}\n\tif projID == \"\" {\n\t\tpanic(\"empty project ID passed to WithContext\")\n\t}\n\treturn context.WithValue(parent, contextKey{}, &cloudContext{\n\t\tProjectID:  projID,\n\t\tHTTPClient: c,\n\t})\n}\n\nconst userAgent = \"gcloud-golang/0.1\"\n\ntype cloudContext struct {\n\tProjectID  string\n\tHTTPClient *http.Client\n\n\tmu  sync.Mutex             // guards svc\n\tsvc map[string]interface{} // e.g. \"storage\" => *rawStorage.Service\n}\n\n// Service returns the result of the fill function if it's never been\n// called before for the given name (which is assumed to be an API\n// service name, like \"datastore\"). If it has already been cached, the fill\n// func is not run.\n// It's safe for concurrent use by multiple goroutines.\nfunc Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {\n\treturn cc(ctx).service(name, fill)\n}\n\nfunc (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.svc == nil {\n\t\tc.svc = make(map[string]interface{})\n\t} else if v, ok := c.svc[name]; ok {\n\t\treturn v\n\t}\n\tv := fill(c.HTTPClient)\n\tc.svc[name] = v\n\treturn v\n}\n\n// Transport is an http.RoundTripper that appends\n// Google Cloud client's user-agent to the original\n// request's user-agent header.\ntype Transport struct {\n\t// Base represents the actual http.RoundTripper\n\t// the requests will be delegated to.\n\tBase http.RoundTripper\n}\n\n// RoundTrip appends a user-agent to the existing user-agent\n// header and delegates the request to the base http.RoundTripper.\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treq = cloneRequest(req)\n\tua := req.Header.Get(\"User-Agent\")\n\tif ua == \"\" {\n\t\tua = userAgent\n\t} else {\n\t\tua = fmt.Sprintf(\"%s %s\", ua, userAgent)\n\t}\n\treq.Header.Set(\"User-Agent\", ua)\n\treturn t.Base.RoundTrip(req)\n}\n\n// cloneRequest returns a clone of the provided *http.Request.\n// The clone is a shallow copy of the struct and its Header map.\nfunc cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}\n\nfunc ProjID(ctx context.Context) string {\n\treturn cc(ctx).ProjectID\n}\n\nfunc HTTPClient(ctx context.Context) *http.Client {\n\treturn cc(ctx).HTTPClient\n}\n\n// cc returns the internal *cloudContext (cc) state for a context.Context.\n// It panics if the user did it wrong.\nfunc cc(ctx context.Context) *cloudContext {\n\tif c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {\n\t\treturn c\n\t}\n\tpanic(\"invalid context.Context type; it should be created with cloud.NewContext\")\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: datastore_v1.proto\n// DO NOT EDIT!\n\n/*\nPackage datastore is a generated protocol buffer package.\n\nIt is generated from these files:\n\tdatastore_v1.proto\n\nIt has these top-level messages:\n\tPartitionId\n\tKey\n\tValue\n\tProperty\n\tEntity\n\tEntityResult\n\tQuery\n\tKindExpression\n\tPropertyReference\n\tPropertyExpression\n\tPropertyOrder\n\tFilter\n\tCompositeFilter\n\tPropertyFilter\n\tGqlQuery\n\tGqlQueryArg\n\tQueryResultBatch\n\tMutation\n\tMutationResult\n\tReadOptions\n\tLookupRequest\n\tLookupResponse\n\tRunQueryRequest\n\tRunQueryResponse\n\tBeginTransactionRequest\n\tBeginTransactionResponse\n\tRollbackRequest\n\tRollbackResponse\n\tCommitRequest\n\tCommitResponse\n\tAllocateIdsRequest\n\tAllocateIdsResponse\n*/\npackage datastore\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = math.Inf\n\n// Specifies what data the 'entity' field contains.\n// A ResultType is either implied (for example, in LookupResponse.found it\n// is always FULL) or specified by context (for example, in message\n// QueryResultBatch, field 'entity_result_type' specifies a ResultType\n// for all the values in field 'entity_result').\ntype EntityResult_ResultType int32\n\nconst (\n\tEntityResult_FULL       EntityResult_ResultType = 1\n\tEntityResult_PROJECTION EntityResult_ResultType = 2\n\t// The entity may have no key.\n\t// A property value may have meaning 18.\n\tEntityResult_KEY_ONLY EntityResult_ResultType = 3\n)\n\nvar EntityResult_ResultType_name = map[int32]string{\n\t1: \"FULL\",\n\t2: \"PROJECTION\",\n\t3: \"KEY_ONLY\",\n}\nvar EntityResult_ResultType_value = map[string]int32{\n\t\"FULL\":       1,\n\t\"PROJECTION\": 2,\n\t\"KEY_ONLY\":   3,\n}\n\nfunc (x EntityResult_ResultType) Enum() *EntityResult_ResultType {\n\tp := new(EntityResult_ResultType)\n\t*p = x\n\treturn p\n}\nfunc (x EntityResult_ResultType) String() string {\n\treturn proto.EnumName(EntityResult_ResultType_name, int32(x))\n}\nfunc (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, \"EntityResult_ResultType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = EntityResult_ResultType(value)\n\treturn nil\n}\n\ntype PropertyExpression_AggregationFunction int32\n\nconst (\n\tPropertyExpression_FIRST PropertyExpression_AggregationFunction = 1\n)\n\nvar PropertyExpression_AggregationFunction_name = map[int32]string{\n\t1: \"FIRST\",\n}\nvar PropertyExpression_AggregationFunction_value = map[string]int32{\n\t\"FIRST\": 1,\n}\n\nfunc (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction {\n\tp := new(PropertyExpression_AggregationFunction)\n\t*p = x\n\treturn p\n}\nfunc (x PropertyExpression_AggregationFunction) String() string {\n\treturn proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x))\n}\nfunc (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, \"PropertyExpression_AggregationFunction\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PropertyExpression_AggregationFunction(value)\n\treturn nil\n}\n\ntype PropertyOrder_Direction int32\n\nconst (\n\tPropertyOrder_ASCENDING  PropertyOrder_Direction = 1\n\tPropertyOrder_DESCENDING PropertyOrder_Direction = 2\n)\n\nvar PropertyOrder_Direction_name = map[int32]string{\n\t1: \"ASCENDING\",\n\t2: \"DESCENDING\",\n}\nvar PropertyOrder_Direction_value = map[string]int32{\n\t\"ASCENDING\":  1,\n\t\"DESCENDING\": 2,\n}\n\nfunc (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction {\n\tp := new(PropertyOrder_Direction)\n\t*p = x\n\treturn p\n}\nfunc (x PropertyOrder_Direction) String() string {\n\treturn proto.EnumName(PropertyOrder_Direction_name, int32(x))\n}\nfunc (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, \"PropertyOrder_Direction\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PropertyOrder_Direction(value)\n\treturn nil\n}\n\ntype CompositeFilter_Operator int32\n\nconst (\n\tCompositeFilter_AND CompositeFilter_Operator = 1\n)\n\nvar CompositeFilter_Operator_name = map[int32]string{\n\t1: \"AND\",\n}\nvar CompositeFilter_Operator_value = map[string]int32{\n\t\"AND\": 1,\n}\n\nfunc (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator {\n\tp := new(CompositeFilter_Operator)\n\t*p = x\n\treturn p\n}\nfunc (x CompositeFilter_Operator) String() string {\n\treturn proto.EnumName(CompositeFilter_Operator_name, int32(x))\n}\nfunc (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, \"CompositeFilter_Operator\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CompositeFilter_Operator(value)\n\treturn nil\n}\n\ntype PropertyFilter_Operator int32\n\nconst (\n\tPropertyFilter_LESS_THAN             PropertyFilter_Operator = 1\n\tPropertyFilter_LESS_THAN_OR_EQUAL    PropertyFilter_Operator = 2\n\tPropertyFilter_GREATER_THAN          PropertyFilter_Operator = 3\n\tPropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4\n\tPropertyFilter_EQUAL                 PropertyFilter_Operator = 5\n\tPropertyFilter_HAS_ANCESTOR          PropertyFilter_Operator = 11\n)\n\nvar PropertyFilter_Operator_name = map[int32]string{\n\t1:  \"LESS_THAN\",\n\t2:  \"LESS_THAN_OR_EQUAL\",\n\t3:  \"GREATER_THAN\",\n\t4:  \"GREATER_THAN_OR_EQUAL\",\n\t5:  \"EQUAL\",\n\t11: \"HAS_ANCESTOR\",\n}\nvar PropertyFilter_Operator_value = map[string]int32{\n\t\"LESS_THAN\":             1,\n\t\"LESS_THAN_OR_EQUAL\":    2,\n\t\"GREATER_THAN\":          3,\n\t\"GREATER_THAN_OR_EQUAL\": 4,\n\t\"EQUAL\":                 5,\n\t\"HAS_ANCESTOR\":          11,\n}\n\nfunc (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator {\n\tp := new(PropertyFilter_Operator)\n\t*p = x\n\treturn p\n}\nfunc (x PropertyFilter_Operator) String() string {\n\treturn proto.EnumName(PropertyFilter_Operator_name, int32(x))\n}\nfunc (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, \"PropertyFilter_Operator\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PropertyFilter_Operator(value)\n\treturn nil\n}\n\n// The possible values for the 'more_results' field.\ntype QueryResultBatch_MoreResultsType int32\n\nconst (\n\tQueryResultBatch_NOT_FINISHED             QueryResultBatch_MoreResultsType = 1\n\tQueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2\n\t// results after the limit.\n\tQueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3\n)\n\nvar QueryResultBatch_MoreResultsType_name = map[int32]string{\n\t1: \"NOT_FINISHED\",\n\t2: \"MORE_RESULTS_AFTER_LIMIT\",\n\t3: \"NO_MORE_RESULTS\",\n}\nvar QueryResultBatch_MoreResultsType_value = map[string]int32{\n\t\"NOT_FINISHED\":             1,\n\t\"MORE_RESULTS_AFTER_LIMIT\": 2,\n\t\"NO_MORE_RESULTS\":          3,\n}\n\nfunc (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType {\n\tp := new(QueryResultBatch_MoreResultsType)\n\t*p = x\n\treturn p\n}\nfunc (x QueryResultBatch_MoreResultsType) String() string {\n\treturn proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x))\n}\nfunc (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, \"QueryResultBatch_MoreResultsType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = QueryResultBatch_MoreResultsType(value)\n\treturn nil\n}\n\ntype ReadOptions_ReadConsistency int32\n\nconst (\n\tReadOptions_DEFAULT  ReadOptions_ReadConsistency = 0\n\tReadOptions_STRONG   ReadOptions_ReadConsistency = 1\n\tReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2\n)\n\nvar ReadOptions_ReadConsistency_name = map[int32]string{\n\t0: \"DEFAULT\",\n\t1: \"STRONG\",\n\t2: \"EVENTUAL\",\n}\nvar ReadOptions_ReadConsistency_value = map[string]int32{\n\t\"DEFAULT\":  0,\n\t\"STRONG\":   1,\n\t\"EVENTUAL\": 2,\n}\n\nfunc (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency {\n\tp := new(ReadOptions_ReadConsistency)\n\t*p = x\n\treturn p\n}\nfunc (x ReadOptions_ReadConsistency) String() string {\n\treturn proto.EnumName(ReadOptions_ReadConsistency_name, int32(x))\n}\nfunc (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, \"ReadOptions_ReadConsistency\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = ReadOptions_ReadConsistency(value)\n\treturn nil\n}\n\ntype BeginTransactionRequest_IsolationLevel int32\n\nconst (\n\tBeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0\n\t// conflict if their mutations conflict. For example:\n\t// Read(A),Write(B) may not conflict with Read(B),Write(A),\n\t// but Read(B),Write(B) does conflict with Read(B),Write(B).\n\tBeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1\n)\n\nvar BeginTransactionRequest_IsolationLevel_name = map[int32]string{\n\t0: \"SNAPSHOT\",\n\t1: \"SERIALIZABLE\",\n}\nvar BeginTransactionRequest_IsolationLevel_value = map[string]int32{\n\t\"SNAPSHOT\":     0,\n\t\"SERIALIZABLE\": 1,\n}\n\nfunc (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel {\n\tp := new(BeginTransactionRequest_IsolationLevel)\n\t*p = x\n\treturn p\n}\nfunc (x BeginTransactionRequest_IsolationLevel) String() string {\n\treturn proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x))\n}\nfunc (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, \"BeginTransactionRequest_IsolationLevel\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = BeginTransactionRequest_IsolationLevel(value)\n\treturn nil\n}\n\ntype CommitRequest_Mode int32\n\nconst (\n\tCommitRequest_TRANSACTIONAL     CommitRequest_Mode = 1\n\tCommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2\n)\n\nvar CommitRequest_Mode_name = map[int32]string{\n\t1: \"TRANSACTIONAL\",\n\t2: \"NON_TRANSACTIONAL\",\n}\nvar CommitRequest_Mode_value = map[string]int32{\n\t\"TRANSACTIONAL\":     1,\n\t\"NON_TRANSACTIONAL\": 2,\n}\n\nfunc (x CommitRequest_Mode) Enum() *CommitRequest_Mode {\n\tp := new(CommitRequest_Mode)\n\t*p = x\n\treturn p\n}\nfunc (x CommitRequest_Mode) String() string {\n\treturn proto.EnumName(CommitRequest_Mode_name, int32(x))\n}\nfunc (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, \"CommitRequest_Mode\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CommitRequest_Mode(value)\n\treturn nil\n}\n\n// An identifier for a particular subset of entities.\n//\n// Entities are partitioned into various subsets, each used by different\n// datasets and different namespaces within a dataset and so forth.\n//\n// All input partition IDs are normalized before use.\n// A partition ID is normalized as follows:\n//   If the partition ID is unset or is set to an empty partition ID, replace it\n//       with the context partition ID.\n//   Otherwise, if the partition ID has no dataset ID, assign it the context\n//       partition ID's dataset ID.\n// Unless otherwise documented, the context partition ID has the dataset ID set\n// to the context dataset ID and no other partition dimension set.\n//\n// A partition ID is empty if all of its fields are unset.\n//\n// Partition dimension:\n// A dimension may be unset.\n// A dimension's value must never be \"\".\n// A dimension's value must match [A-Za-z\\d\\.\\-_]{1,100}\n// If the value of any dimension matches regex \"__.*__\",\n// the partition is reserved/read-only.\n// A reserved/read-only partition ID is forbidden in certain documented contexts.\n//\n// Dataset ID:\n// A dataset id's value must never be \"\".\n// A dataset id's value must match\n// ([a-z\\d\\-]{1,100}~)?([a-z\\d][a-z\\d\\-\\.]{0,99}:)?([a-z\\d][a-z\\d\\-]{0,99}\ntype PartitionId struct {\n\t// The dataset ID.\n\tDatasetId *string `protobuf:\"bytes,3,opt,name=dataset_id\" json:\"dataset_id,omitempty\"`\n\t// The namespace.\n\tNamespace        *string `protobuf:\"bytes,4,opt,name=namespace\" json:\"namespace,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *PartitionId) Reset()         { *m = PartitionId{} }\nfunc (m *PartitionId) String() string { return proto.CompactTextString(m) }\nfunc (*PartitionId) ProtoMessage()    {}\n\nfunc (m *PartitionId) GetDatasetId() string {\n\tif m != nil && m.DatasetId != nil {\n\t\treturn *m.DatasetId\n\t}\n\treturn \"\"\n}\n\nfunc (m *PartitionId) GetNamespace() string {\n\tif m != nil && m.Namespace != nil {\n\t\treturn *m.Namespace\n\t}\n\treturn \"\"\n}\n\n// A unique identifier for an entity.\n// If a key's partition id or any of its path kinds or names are\n// reserved/read-only, the key is reserved/read-only.\n// A reserved/read-only key is forbidden in certain documented contexts.\ntype Key struct {\n\t// Entities are partitioned into subsets, currently identified by a dataset\n\t// (usually implicitly specified by the project) and namespace ID.\n\t// Queries are scoped to a single partition.\n\tPartitionId *PartitionId `protobuf:\"bytes,1,opt,name=partition_id\" json:\"partition_id,omitempty\"`\n\t// The entity path.\n\t// An entity path consists of one or more elements composed of a kind and a\n\t// string or numerical identifier, which identify entities. The first\n\t// element identifies a <em>root entity</em>, the second element identifies\n\t// a <em>child</em> of the root entity, the third element a child of the\n\t// second entity, and so forth. The entities identified by all prefixes of\n\t// the path are called the element's <em>ancestors</em>.\n\t// An entity path is always fully complete: ALL of the entity's ancestors\n\t// are required to be in the path along with the entity identifier itself.\n\t// The only exception is that in some documented cases, the identifier in the\n\t// last path element (for the entity) itself may be omitted. A path can never\n\t// be empty.\n\tPathElement      []*Key_PathElement `protobuf:\"bytes,2,rep,name=path_element\" json:\"path_element,omitempty\"`\n\tXXX_unrecognized []byte             `json:\"-\"`\n}\n\nfunc (m *Key) Reset()         { *m = Key{} }\nfunc (m *Key) String() string { return proto.CompactTextString(m) }\nfunc (*Key) ProtoMessage()    {}\n\nfunc (m *Key) GetPartitionId() *PartitionId {\n\tif m != nil {\n\t\treturn m.PartitionId\n\t}\n\treturn nil\n}\n\nfunc (m *Key) GetPathElement() []*Key_PathElement {\n\tif m != nil {\n\t\treturn m.PathElement\n\t}\n\treturn nil\n}\n\n// A (kind, ID/name) pair used to construct a key path.\n//\n// At most one of name or ID may be set.\n// If either is set, the element is complete.\n// If neither is set, the element is incomplete.\ntype Key_PathElement struct {\n\t// The kind of the entity.\n\t// A kind matching regex \"__.*__\" is reserved/read-only.\n\t// A kind must not contain more than 500 characters.\n\t// Cannot be \"\".\n\tKind *string `protobuf:\"bytes,1,req,name=kind\" json:\"kind,omitempty\"`\n\t// The ID of the entity.\n\t// Never equal to zero. Values less than zero are discouraged and will not\n\t// be supported in the future.\n\tId *int64 `protobuf:\"varint,2,opt,name=id\" json:\"id,omitempty\"`\n\t// The name of the entity.\n\t// A name matching regex \"__.*__\" is reserved/read-only.\n\t// A name must not be more than 500 characters.\n\t// Cannot be \"\".\n\tName             *string `protobuf:\"bytes,3,opt,name=name\" json:\"name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Key_PathElement) Reset()         { *m = Key_PathElement{} }\nfunc (m *Key_PathElement) String() string { return proto.CompactTextString(m) }\nfunc (*Key_PathElement) ProtoMessage()    {}\n\nfunc (m *Key_PathElement) GetKind() string {\n\tif m != nil && m.Kind != nil {\n\t\treturn *m.Kind\n\t}\n\treturn \"\"\n}\n\nfunc (m *Key_PathElement) GetId() int64 {\n\tif m != nil && m.Id != nil {\n\t\treturn *m.Id\n\t}\n\treturn 0\n}\n\nfunc (m *Key_PathElement) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\n// A message that can hold any of the supported value types and associated\n// metadata.\n//\n// At most one of the <type>Value fields may be set.\n// If none are set the value is \"null\".\n//\ntype Value struct {\n\t// A boolean value.\n\tBooleanValue *bool `protobuf:\"varint,1,opt,name=boolean_value\" json:\"boolean_value,omitempty\"`\n\t// An integer value.\n\tIntegerValue *int64 `protobuf:\"varint,2,opt,name=integer_value\" json:\"integer_value,omitempty\"`\n\t// A double value.\n\tDoubleValue *float64 `protobuf:\"fixed64,3,opt,name=double_value\" json:\"double_value,omitempty\"`\n\t// A timestamp value.\n\tTimestampMicrosecondsValue *int64 `protobuf:\"varint,4,opt,name=timestamp_microseconds_value\" json:\"timestamp_microseconds_value,omitempty\"`\n\t// A key value.\n\tKeyValue *Key `protobuf:\"bytes,5,opt,name=key_value\" json:\"key_value,omitempty\"`\n\t// A blob key value.\n\tBlobKeyValue *string `protobuf:\"bytes,16,opt,name=blob_key_value\" json:\"blob_key_value,omitempty\"`\n\t// A UTF-8 encoded string value.\n\tStringValue *string `protobuf:\"bytes,17,opt,name=string_value\" json:\"string_value,omitempty\"`\n\t// A blob value.\n\tBlobValue []byte `protobuf:\"bytes,18,opt,name=blob_value\" json:\"blob_value,omitempty\"`\n\t// An entity value.\n\t// May have no key.\n\t// May have a key with an incomplete key path.\n\t// May have a reserved/read-only key.\n\tEntityValue *Entity `protobuf:\"bytes,6,opt,name=entity_value\" json:\"entity_value,omitempty\"`\n\t// A list value.\n\t// Cannot contain another list value.\n\t// Cannot also have a meaning and indexing set.\n\tListValue []*Value `protobuf:\"bytes,7,rep,name=list_value\" json:\"list_value,omitempty\"`\n\t// The <code>meaning</code> field is reserved and should not be used.\n\tMeaning *int32 `protobuf:\"varint,14,opt,name=meaning\" json:\"meaning,omitempty\"`\n\t// If the value should be indexed.\n\t//\n\t// The <code>indexed</code> property may be set for a\n\t// <code>null</code> value.\n\t// When <code>indexed</code> is <code>true</code>, <code>stringValue</code>\n\t// is limited to 500 characters and the blob value is limited to 500 bytes.\n\t// Exception: If meaning is set to 2, string_value is limited to 2038\n\t// characters regardless of indexed.\n\t// When indexed is true, meaning 15 and 22 are not allowed, and meaning 16\n\t// will be ignored on input (and will never be set on output).\n\t// Input values by default have <code>indexed</code> set to\n\t// <code>true</code>; however, you can explicitly set <code>indexed</code> to\n\t// <code>true</code> if you want. (An output value never has\n\t// <code>indexed</code> explicitly set to <code>true</code>.) If a value is\n\t// itself an entity, it cannot have <code>indexed</code> set to\n\t// <code>true</code>.\n\t// Exception: An entity value with meaning 9, 20 or 21 may be indexed.\n\tIndexed          *bool  `protobuf:\"varint,15,opt,name=indexed,def=1\" json:\"indexed,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Value) Reset()         { *m = Value{} }\nfunc (m *Value) String() string { return proto.CompactTextString(m) }\nfunc (*Value) ProtoMessage()    {}\n\nconst Default_Value_Indexed bool = true\n\nfunc (m *Value) GetBooleanValue() bool {\n\tif m != nil && m.BooleanValue != nil {\n\t\treturn *m.BooleanValue\n\t}\n\treturn false\n}\n\nfunc (m *Value) GetIntegerValue() int64 {\n\tif m != nil && m.IntegerValue != nil {\n\t\treturn *m.IntegerValue\n\t}\n\treturn 0\n}\n\nfunc (m *Value) GetDoubleValue() float64 {\n\tif m != nil && m.DoubleValue != nil {\n\t\treturn *m.DoubleValue\n\t}\n\treturn 0\n}\n\nfunc (m *Value) GetTimestampMicrosecondsValue() int64 {\n\tif m != nil && m.TimestampMicrosecondsValue != nil {\n\t\treturn *m.TimestampMicrosecondsValue\n\t}\n\treturn 0\n}\n\nfunc (m *Value) GetKeyValue() *Key {\n\tif m != nil {\n\t\treturn m.KeyValue\n\t}\n\treturn nil\n}\n\nfunc (m *Value) GetBlobKeyValue() string {\n\tif m != nil && m.BlobKeyValue != nil {\n\t\treturn *m.BlobKeyValue\n\t}\n\treturn \"\"\n}\n\nfunc (m *Value) GetStringValue() string {\n\tif m != nil && m.StringValue != nil {\n\t\treturn *m.StringValue\n\t}\n\treturn \"\"\n}\n\nfunc (m *Value) GetBlobValue() []byte {\n\tif m != nil {\n\t\treturn m.BlobValue\n\t}\n\treturn nil\n}\n\nfunc (m *Value) GetEntityValue() *Entity {\n\tif m != nil {\n\t\treturn m.EntityValue\n\t}\n\treturn nil\n}\n\nfunc (m *Value) GetListValue() []*Value {\n\tif m != nil {\n\t\treturn m.ListValue\n\t}\n\treturn nil\n}\n\nfunc (m *Value) GetMeaning() int32 {\n\tif m != nil && m.Meaning != nil {\n\t\treturn *m.Meaning\n\t}\n\treturn 0\n}\n\nfunc (m *Value) GetIndexed() bool {\n\tif m != nil && m.Indexed != nil {\n\t\treturn *m.Indexed\n\t}\n\treturn Default_Value_Indexed\n}\n\n// An entity property.\ntype Property struct {\n\t// The name of the property.\n\t// A property name matching regex \"__.*__\" is reserved.\n\t// A reserved property name is forbidden in certain documented contexts.\n\t// The name must not contain more than 500 characters.\n\t// Cannot be \"\".\n\tName *string `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\t// The value(s) of the property.\n\t// Each value can have only one value property populated. For example,\n\t// you cannot have a values list of <code>{ value: { integerValue: 22,\n\t// stringValue: \"a\" } }</code>, but you can have <code>{ value: { listValue:\n\t// [ { integerValue: 22 }, { stringValue: \"a\" } ] }</code>.\n\tValue            *Value `protobuf:\"bytes,4,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Property) Reset()         { *m = Property{} }\nfunc (m *Property) String() string { return proto.CompactTextString(m) }\nfunc (*Property) ProtoMessage()    {}\n\nfunc (m *Property) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Property) GetValue() *Value {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\n// An entity.\n//\n// An entity is limited to 1 megabyte when stored. That <em>roughly</em>\n// corresponds to a limit of 1 megabyte for the serialized form of this\n// message.\ntype Entity struct {\n\t// The entity's key.\n\t//\n\t// An entity must have a key, unless otherwise documented (for example,\n\t// an entity in <code>Value.entityValue</code> may have no key).\n\t// An entity's kind is its key's path's last element's kind,\n\t// or null if it has no key.\n\tKey *Key `protobuf:\"bytes,1,opt,name=key\" json:\"key,omitempty\"`\n\t// The entity's properties.\n\t// Each property's name must be unique for its entity.\n\tProperty         []*Property `protobuf:\"bytes,2,rep,name=property\" json:\"property,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *Entity) Reset()         { *m = Entity{} }\nfunc (m *Entity) String() string { return proto.CompactTextString(m) }\nfunc (*Entity) ProtoMessage()    {}\n\nfunc (m *Entity) GetKey() *Key {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc (m *Entity) GetProperty() []*Property {\n\tif m != nil {\n\t\treturn m.Property\n\t}\n\treturn nil\n}\n\n// The result of fetching an entity from the datastore.\ntype EntityResult struct {\n\t// The resulting entity.\n\tEntity           *Entity `protobuf:\"bytes,1,req,name=entity\" json:\"entity,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *EntityResult) Reset()         { *m = EntityResult{} }\nfunc (m *EntityResult) String() string { return proto.CompactTextString(m) }\nfunc (*EntityResult) ProtoMessage()    {}\n\nfunc (m *EntityResult) GetEntity() *Entity {\n\tif m != nil {\n\t\treturn m.Entity\n\t}\n\treturn nil\n}\n\n// A query.\ntype Query struct {\n\t// The projection to return. If not set the entire entity is returned.\n\tProjection []*PropertyExpression `protobuf:\"bytes,2,rep,name=projection\" json:\"projection,omitempty\"`\n\t// The kinds to query (if empty, returns entities from all kinds).\n\tKind []*KindExpression `protobuf:\"bytes,3,rep,name=kind\" json:\"kind,omitempty\"`\n\t// The filter to apply (optional).\n\tFilter *Filter `protobuf:\"bytes,4,opt,name=filter\" json:\"filter,omitempty\"`\n\t// The order to apply to the query results (if empty, order is unspecified).\n\tOrder []*PropertyOrder `protobuf:\"bytes,5,rep,name=order\" json:\"order,omitempty\"`\n\t// The properties to group by (if empty, no grouping is applied to the\n\t// result set).\n\tGroupBy []*PropertyReference `protobuf:\"bytes,6,rep,name=group_by\" json:\"group_by,omitempty\"`\n\t// A starting point for the query results. Optional. Query cursors are\n\t// returned in query result batches.\n\tStartCursor []byte `protobuf:\"bytes,7,opt,name=start_cursor\" json:\"start_cursor,omitempty\"`\n\t// An ending point for the query results. Optional. Query cursors are\n\t// returned in query result batches.\n\tEndCursor []byte `protobuf:\"bytes,8,opt,name=end_cursor\" json:\"end_cursor,omitempty\"`\n\t// The number of results to skip. Applies before limit, but after all other\n\t// constraints (optional, defaults to 0).\n\tOffset *int32 `protobuf:\"varint,10,opt,name=offset,def=0\" json:\"offset,omitempty\"`\n\t// The maximum number of results to return. Applies after all other\n\t// constraints. Optional.\n\tLimit            *int32 `protobuf:\"varint,11,opt,name=limit\" json:\"limit,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Query) Reset()         { *m = Query{} }\nfunc (m *Query) String() string { return proto.CompactTextString(m) }\nfunc (*Query) ProtoMessage()    {}\n\nconst Default_Query_Offset int32 = 0\n\nfunc (m *Query) GetProjection() []*PropertyExpression {\n\tif m != nil {\n\t\treturn m.Projection\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetKind() []*KindExpression {\n\tif m != nil {\n\t\treturn m.Kind\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetFilter() *Filter {\n\tif m != nil {\n\t\treturn m.Filter\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetOrder() []*PropertyOrder {\n\tif m != nil {\n\t\treturn m.Order\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetGroupBy() []*PropertyReference {\n\tif m != nil {\n\t\treturn m.GroupBy\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetStartCursor() []byte {\n\tif m != nil {\n\t\treturn m.StartCursor\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetEndCursor() []byte {\n\tif m != nil {\n\t\treturn m.EndCursor\n\t}\n\treturn nil\n}\n\nfunc (m *Query) GetOffset() int32 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn Default_Query_Offset\n}\n\nfunc (m *Query) GetLimit() int32 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn 0\n}\n\n// A representation of a kind.\ntype KindExpression struct {\n\t// The name of the kind.\n\tName             *string `protobuf:\"bytes,1,req,name=name\" json:\"name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *KindExpression) Reset()         { *m = KindExpression{} }\nfunc (m *KindExpression) String() string { return proto.CompactTextString(m) }\nfunc (*KindExpression) ProtoMessage()    {}\n\nfunc (m *KindExpression) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\n// A reference to a property relative to the kind expressions.\n// exactly.\ntype PropertyReference struct {\n\t// The name of the property.\n\tName             *string `protobuf:\"bytes,2,req,name=name\" json:\"name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *PropertyReference) Reset()         { *m = PropertyReference{} }\nfunc (m *PropertyReference) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyReference) ProtoMessage()    {}\n\nfunc (m *PropertyReference) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\n// A representation of a property in a projection.\ntype PropertyExpression struct {\n\t// The property to project.\n\tProperty *PropertyReference `protobuf:\"bytes,1,req,name=property\" json:\"property,omitempty\"`\n\t// The aggregation function to apply to the property. Optional.\n\t// Can only be used when grouping by at least one property. Must\n\t// then be set on all properties in the projection that are not\n\t// being grouped by.\n\tAggregationFunction *PropertyExpression_AggregationFunction `protobuf:\"varint,2,opt,name=aggregation_function,enum=datastore.PropertyExpression_AggregationFunction\" json:\"aggregation_function,omitempty\"`\n\tXXX_unrecognized    []byte                                  `json:\"-\"`\n}\n\nfunc (m *PropertyExpression) Reset()         { *m = PropertyExpression{} }\nfunc (m *PropertyExpression) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyExpression) ProtoMessage()    {}\n\nfunc (m *PropertyExpression) GetProperty() *PropertyReference {\n\tif m != nil {\n\t\treturn m.Property\n\t}\n\treturn nil\n}\n\nfunc (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction {\n\tif m != nil && m.AggregationFunction != nil {\n\t\treturn *m.AggregationFunction\n\t}\n\treturn PropertyExpression_FIRST\n}\n\n// The desired order for a specific property.\ntype PropertyOrder struct {\n\t// The property to order by.\n\tProperty *PropertyReference `protobuf:\"bytes,1,req,name=property\" json:\"property,omitempty\"`\n\t// The direction to order by.\n\tDirection        *PropertyOrder_Direction `protobuf:\"varint,2,opt,name=direction,enum=datastore.PropertyOrder_Direction,def=1\" json:\"direction,omitempty\"`\n\tXXX_unrecognized []byte                   `json:\"-\"`\n}\n\nfunc (m *PropertyOrder) Reset()         { *m = PropertyOrder{} }\nfunc (m *PropertyOrder) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyOrder) ProtoMessage()    {}\n\nconst Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING\n\nfunc (m *PropertyOrder) GetProperty() *PropertyReference {\n\tif m != nil {\n\t\treturn m.Property\n\t}\n\treturn nil\n}\n\nfunc (m *PropertyOrder) GetDirection() PropertyOrder_Direction {\n\tif m != nil && m.Direction != nil {\n\t\treturn *m.Direction\n\t}\n\treturn Default_PropertyOrder_Direction\n}\n\n// A holder for any type of filter. Exactly one field should be specified.\ntype Filter struct {\n\t// A composite filter.\n\tCompositeFilter *CompositeFilter `protobuf:\"bytes,1,opt,name=composite_filter\" json:\"composite_filter,omitempty\"`\n\t// A filter on a property.\n\tPropertyFilter   *PropertyFilter `protobuf:\"bytes,2,opt,name=property_filter\" json:\"property_filter,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *Filter) Reset()         { *m = Filter{} }\nfunc (m *Filter) String() string { return proto.CompactTextString(m) }\nfunc (*Filter) ProtoMessage()    {}\n\nfunc (m *Filter) GetCompositeFilter() *CompositeFilter {\n\tif m != nil {\n\t\treturn m.CompositeFilter\n\t}\n\treturn nil\n}\n\nfunc (m *Filter) GetPropertyFilter() *PropertyFilter {\n\tif m != nil {\n\t\treturn m.PropertyFilter\n\t}\n\treturn nil\n}\n\n// A filter that merges the multiple other filters using the given operation.\ntype CompositeFilter struct {\n\t// The operator for combining multiple filters.\n\tOperator *CompositeFilter_Operator `protobuf:\"varint,1,req,name=operator,enum=datastore.CompositeFilter_Operator\" json:\"operator,omitempty\"`\n\t// The list of filters to combine.\n\t// Must contain at least one filter.\n\tFilter           []*Filter `protobuf:\"bytes,2,rep,name=filter\" json:\"filter,omitempty\"`\n\tXXX_unrecognized []byte    `json:\"-\"`\n}\n\nfunc (m *CompositeFilter) Reset()         { *m = CompositeFilter{} }\nfunc (m *CompositeFilter) String() string { return proto.CompactTextString(m) }\nfunc (*CompositeFilter) ProtoMessage()    {}\n\nfunc (m *CompositeFilter) GetOperator() CompositeFilter_Operator {\n\tif m != nil && m.Operator != nil {\n\t\treturn *m.Operator\n\t}\n\treturn CompositeFilter_AND\n}\n\nfunc (m *CompositeFilter) GetFilter() []*Filter {\n\tif m != nil {\n\t\treturn m.Filter\n\t}\n\treturn nil\n}\n\n// A filter on a specific property.\ntype PropertyFilter struct {\n\t// The property to filter by.\n\tProperty *PropertyReference `protobuf:\"bytes,1,req,name=property\" json:\"property,omitempty\"`\n\t// The operator to filter by.\n\tOperator *PropertyFilter_Operator `protobuf:\"varint,2,req,name=operator,enum=datastore.PropertyFilter_Operator\" json:\"operator,omitempty\"`\n\t// The value to compare the property to.\n\tValue            *Value `protobuf:\"bytes,3,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *PropertyFilter) Reset()         { *m = PropertyFilter{} }\nfunc (m *PropertyFilter) String() string { return proto.CompactTextString(m) }\nfunc (*PropertyFilter) ProtoMessage()    {}\n\nfunc (m *PropertyFilter) GetProperty() *PropertyReference {\n\tif m != nil {\n\t\treturn m.Property\n\t}\n\treturn nil\n}\n\nfunc (m *PropertyFilter) GetOperator() PropertyFilter_Operator {\n\tif m != nil && m.Operator != nil {\n\t\treturn *m.Operator\n\t}\n\treturn PropertyFilter_LESS_THAN\n}\n\nfunc (m *PropertyFilter) GetValue() *Value {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\n// A GQL query.\ntype GqlQuery struct {\n\tQueryString *string `protobuf:\"bytes,1,req,name=query_string\" json:\"query_string,omitempty\"`\n\t// When false, the query string must not contain a literal.\n\tAllowLiteral *bool `protobuf:\"varint,2,opt,name=allow_literal,def=0\" json:\"allow_literal,omitempty\"`\n\t// A named argument must set field GqlQueryArg.name.\n\t// No two named arguments may have the same name.\n\t// For each non-reserved named binding site in the query string,\n\t// there must be a named argument with that name,\n\t// but not necessarily the inverse.\n\tNameArg []*GqlQueryArg `protobuf:\"bytes,3,rep,name=name_arg\" json:\"name_arg,omitempty\"`\n\t// Numbered binding site @1 references the first numbered argument,\n\t// effectively using 1-based indexing, rather than the usual 0.\n\t// A numbered argument must NOT set field GqlQueryArg.name.\n\t// For each binding site numbered i in query_string,\n\t// there must be an ith numbered argument.\n\t// The inverse must also be true.\n\tNumberArg        []*GqlQueryArg `protobuf:\"bytes,4,rep,name=number_arg\" json:\"number_arg,omitempty\"`\n\tXXX_unrecognized []byte         `json:\"-\"`\n}\n\nfunc (m *GqlQuery) Reset()         { *m = GqlQuery{} }\nfunc (m *GqlQuery) String() string { return proto.CompactTextString(m) }\nfunc (*GqlQuery) ProtoMessage()    {}\n\nconst Default_GqlQuery_AllowLiteral bool = false\n\nfunc (m *GqlQuery) GetQueryString() string {\n\tif m != nil && m.QueryString != nil {\n\t\treturn *m.QueryString\n\t}\n\treturn \"\"\n}\n\nfunc (m *GqlQuery) GetAllowLiteral() bool {\n\tif m != nil && m.AllowLiteral != nil {\n\t\treturn *m.AllowLiteral\n\t}\n\treturn Default_GqlQuery_AllowLiteral\n}\n\nfunc (m *GqlQuery) GetNameArg() []*GqlQueryArg {\n\tif m != nil {\n\t\treturn m.NameArg\n\t}\n\treturn nil\n}\n\nfunc (m *GqlQuery) GetNumberArg() []*GqlQueryArg {\n\tif m != nil {\n\t\treturn m.NumberArg\n\t}\n\treturn nil\n}\n\n// A binding argument for a GQL query.\n// Exactly one of fields value and cursor must be set.\ntype GqlQueryArg struct {\n\t// Must match regex \"[A-Za-z_$][A-Za-z_$0-9]*\".\n\t// Must not match regex \"__.*__\".\n\t// Must not be \"\".\n\tName             *string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tValue            *Value  `protobuf:\"bytes,2,opt,name=value\" json:\"value,omitempty\"`\n\tCursor           []byte  `protobuf:\"bytes,3,opt,name=cursor\" json:\"cursor,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *GqlQueryArg) Reset()         { *m = GqlQueryArg{} }\nfunc (m *GqlQueryArg) String() string { return proto.CompactTextString(m) }\nfunc (*GqlQueryArg) ProtoMessage()    {}\n\nfunc (m *GqlQueryArg) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *GqlQueryArg) GetValue() *Value {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\nfunc (m *GqlQueryArg) GetCursor() []byte {\n\tif m != nil {\n\t\treturn m.Cursor\n\t}\n\treturn nil\n}\n\n// A batch of results produced by a query.\ntype QueryResultBatch struct {\n\t// The result type for every entity in entityResults.\n\tEntityResultType *EntityResult_ResultType `protobuf:\"varint,1,req,name=entity_result_type,enum=datastore.EntityResult_ResultType\" json:\"entity_result_type,omitempty\"`\n\t// The results for this batch.\n\tEntityResult []*EntityResult `protobuf:\"bytes,2,rep,name=entity_result\" json:\"entity_result,omitempty\"`\n\t// A cursor that points to the position after the last result in the batch.\n\t// May be absent.\n\tEndCursor []byte `protobuf:\"bytes,4,opt,name=end_cursor\" json:\"end_cursor,omitempty\"`\n\t// The state of the query after the current batch.\n\tMoreResults *QueryResultBatch_MoreResultsType `protobuf:\"varint,5,req,name=more_results,enum=datastore.QueryResultBatch_MoreResultsType\" json:\"more_results,omitempty\"`\n\t// The number of results skipped because of <code>Query.offset</code>.\n\tSkippedResults   *int32 `protobuf:\"varint,6,opt,name=skipped_results\" json:\"skipped_results,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *QueryResultBatch) Reset()         { *m = QueryResultBatch{} }\nfunc (m *QueryResultBatch) String() string { return proto.CompactTextString(m) }\nfunc (*QueryResultBatch) ProtoMessage()    {}\n\nfunc (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType {\n\tif m != nil && m.EntityResultType != nil {\n\t\treturn *m.EntityResultType\n\t}\n\treturn EntityResult_FULL\n}\n\nfunc (m *QueryResultBatch) GetEntityResult() []*EntityResult {\n\tif m != nil {\n\t\treturn m.EntityResult\n\t}\n\treturn nil\n}\n\nfunc (m *QueryResultBatch) GetEndCursor() []byte {\n\tif m != nil {\n\t\treturn m.EndCursor\n\t}\n\treturn nil\n}\n\nfunc (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType {\n\tif m != nil && m.MoreResults != nil {\n\t\treturn *m.MoreResults\n\t}\n\treturn QueryResultBatch_NOT_FINISHED\n}\n\nfunc (m *QueryResultBatch) GetSkippedResults() int32 {\n\tif m != nil && m.SkippedResults != nil {\n\t\treturn *m.SkippedResults\n\t}\n\treturn 0\n}\n\n// A set of changes to apply.\n//\n// No entity in this message may have a reserved property name,\n// not even a property in an entity in a value.\n// No value in this message may have meaning 18,\n// not even a value in an entity in another value.\n//\n// If entities with duplicate keys are present, an arbitrary choice will\n// be made as to which is written.\ntype Mutation struct {\n\t// Entities to upsert.\n\t// Each upserted entity's key must have a complete path and\n\t// must not be reserved/read-only.\n\tUpsert []*Entity `protobuf:\"bytes,1,rep,name=upsert\" json:\"upsert,omitempty\"`\n\t// Entities to update.\n\t// Each updated entity's key must have a complete path and\n\t// must not be reserved/read-only.\n\tUpdate []*Entity `protobuf:\"bytes,2,rep,name=update\" json:\"update,omitempty\"`\n\t// Entities to insert.\n\t// Each inserted entity's key must have a complete path and\n\t// must not be reserved/read-only.\n\tInsert []*Entity `protobuf:\"bytes,3,rep,name=insert\" json:\"insert,omitempty\"`\n\t// Insert entities with a newly allocated ID.\n\t// Each inserted entity's key must omit the final identifier in its path and\n\t// must not be reserved/read-only.\n\tInsertAutoId []*Entity `protobuf:\"bytes,4,rep,name=insert_auto_id\" json:\"insert_auto_id,omitempty\"`\n\t// Keys of entities to delete.\n\t// Each key must have a complete key path and must not be reserved/read-only.\n\tDelete []*Key `protobuf:\"bytes,5,rep,name=delete\" json:\"delete,omitempty\"`\n\t// Ignore a user specified read-only period. Optional.\n\tForce            *bool  `protobuf:\"varint,6,opt,name=force\" json:\"force,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Mutation) Reset()         { *m = Mutation{} }\nfunc (m *Mutation) String() string { return proto.CompactTextString(m) }\nfunc (*Mutation) ProtoMessage()    {}\n\nfunc (m *Mutation) GetUpsert() []*Entity {\n\tif m != nil {\n\t\treturn m.Upsert\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetUpdate() []*Entity {\n\tif m != nil {\n\t\treturn m.Update\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetInsert() []*Entity {\n\tif m != nil {\n\t\treturn m.Insert\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetInsertAutoId() []*Entity {\n\tif m != nil {\n\t\treturn m.InsertAutoId\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetDelete() []*Key {\n\tif m != nil {\n\t\treturn m.Delete\n\t}\n\treturn nil\n}\n\nfunc (m *Mutation) GetForce() bool {\n\tif m != nil && m.Force != nil {\n\t\treturn *m.Force\n\t}\n\treturn false\n}\n\n// The result of applying a mutation.\ntype MutationResult struct {\n\t// Number of index writes.\n\tIndexUpdates *int32 `protobuf:\"varint,1,req,name=index_updates\" json:\"index_updates,omitempty\"`\n\t// Keys for <code>insertAutoId</code> entities. One per entity from the\n\t// request, in the same order.\n\tInsertAutoIdKey  []*Key `protobuf:\"bytes,2,rep,name=insert_auto_id_key\" json:\"insert_auto_id_key,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *MutationResult) Reset()         { *m = MutationResult{} }\nfunc (m *MutationResult) String() string { return proto.CompactTextString(m) }\nfunc (*MutationResult) ProtoMessage()    {}\n\nfunc (m *MutationResult) GetIndexUpdates() int32 {\n\tif m != nil && m.IndexUpdates != nil {\n\t\treturn *m.IndexUpdates\n\t}\n\treturn 0\n}\n\nfunc (m *MutationResult) GetInsertAutoIdKey() []*Key {\n\tif m != nil {\n\t\treturn m.InsertAutoIdKey\n\t}\n\treturn nil\n}\n\n// Options shared by read requests.\ntype ReadOptions struct {\n\t// The read consistency to use.\n\t// Cannot be set when transaction is set.\n\t// Lookup and ancestor queries default to STRONG, global queries default to\n\t// EVENTUAL and cannot be set to STRONG.\n\tReadConsistency *ReadOptions_ReadConsistency `protobuf:\"varint,1,opt,name=read_consistency,enum=datastore.ReadOptions_ReadConsistency,def=0\" json:\"read_consistency,omitempty\"`\n\t// The transaction to use. Optional.\n\tTransaction      []byte `protobuf:\"bytes,2,opt,name=transaction\" json:\"transaction,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ReadOptions) Reset()         { *m = ReadOptions{} }\nfunc (m *ReadOptions) String() string { return proto.CompactTextString(m) }\nfunc (*ReadOptions) ProtoMessage()    {}\n\nconst Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT\n\nfunc (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency {\n\tif m != nil && m.ReadConsistency != nil {\n\t\treturn *m.ReadConsistency\n\t}\n\treturn Default_ReadOptions_ReadConsistency\n}\n\nfunc (m *ReadOptions) GetTransaction() []byte {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\n// The request for Lookup.\ntype LookupRequest struct {\n\t// Options for this lookup request. Optional.\n\tReadOptions *ReadOptions `protobuf:\"bytes,1,opt,name=read_options\" json:\"read_options,omitempty\"`\n\t// Keys of entities to look up from the datastore.\n\tKey              []*Key `protobuf:\"bytes,3,rep,name=key\" json:\"key,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *LookupRequest) Reset()         { *m = LookupRequest{} }\nfunc (m *LookupRequest) String() string { return proto.CompactTextString(m) }\nfunc (*LookupRequest) ProtoMessage()    {}\n\nfunc (m *LookupRequest) GetReadOptions() *ReadOptions {\n\tif m != nil {\n\t\treturn m.ReadOptions\n\t}\n\treturn nil\n}\n\nfunc (m *LookupRequest) GetKey() []*Key {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\n// The response for Lookup.\ntype LookupResponse struct {\n\t// Entities found as ResultType.FULL entities.\n\tFound []*EntityResult `protobuf:\"bytes,1,rep,name=found\" json:\"found,omitempty\"`\n\t// Entities not found as ResultType.KEY_ONLY entities.\n\tMissing []*EntityResult `protobuf:\"bytes,2,rep,name=missing\" json:\"missing,omitempty\"`\n\t// A list of keys that were not looked up due to resource constraints.\n\tDeferred         []*Key `protobuf:\"bytes,3,rep,name=deferred\" json:\"deferred,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *LookupResponse) Reset()         { *m = LookupResponse{} }\nfunc (m *LookupResponse) String() string { return proto.CompactTextString(m) }\nfunc (*LookupResponse) ProtoMessage()    {}\n\nfunc (m *LookupResponse) GetFound() []*EntityResult {\n\tif m != nil {\n\t\treturn m.Found\n\t}\n\treturn nil\n}\n\nfunc (m *LookupResponse) GetMissing() []*EntityResult {\n\tif m != nil {\n\t\treturn m.Missing\n\t}\n\treturn nil\n}\n\nfunc (m *LookupResponse) GetDeferred() []*Key {\n\tif m != nil {\n\t\treturn m.Deferred\n\t}\n\treturn nil\n}\n\n// The request for RunQuery.\ntype RunQueryRequest struct {\n\t// The options for this query.\n\tReadOptions *ReadOptions `protobuf:\"bytes,1,opt,name=read_options\" json:\"read_options,omitempty\"`\n\t// Entities are partitioned into subsets, identified by a dataset (usually\n\t// implicitly specified by the project) and namespace ID. Queries are scoped\n\t// to a single partition.\n\t// This partition ID is normalized with the standard default context\n\t// partition ID, but all other partition IDs in RunQueryRequest are\n\t// normalized with this partition ID as the context partition ID.\n\tPartitionId *PartitionId `protobuf:\"bytes,2,opt,name=partition_id\" json:\"partition_id,omitempty\"`\n\t// The query to run.\n\t// Either this field or field gql_query must be set, but not both.\n\tQuery *Query `protobuf:\"bytes,3,opt,name=query\" json:\"query,omitempty\"`\n\t// The GQL query to run.\n\t// Either this field or field query must be set, but not both.\n\tGqlQuery         *GqlQuery `protobuf:\"bytes,7,opt,name=gql_query\" json:\"gql_query,omitempty\"`\n\tXXX_unrecognized []byte    `json:\"-\"`\n}\n\nfunc (m *RunQueryRequest) Reset()         { *m = RunQueryRequest{} }\nfunc (m *RunQueryRequest) String() string { return proto.CompactTextString(m) }\nfunc (*RunQueryRequest) ProtoMessage()    {}\n\nfunc (m *RunQueryRequest) GetReadOptions() *ReadOptions {\n\tif m != nil {\n\t\treturn m.ReadOptions\n\t}\n\treturn nil\n}\n\nfunc (m *RunQueryRequest) GetPartitionId() *PartitionId {\n\tif m != nil {\n\t\treturn m.PartitionId\n\t}\n\treturn nil\n}\n\nfunc (m *RunQueryRequest) GetQuery() *Query {\n\tif m != nil {\n\t\treturn m.Query\n\t}\n\treturn nil\n}\n\nfunc (m *RunQueryRequest) GetGqlQuery() *GqlQuery {\n\tif m != nil {\n\t\treturn m.GqlQuery\n\t}\n\treturn nil\n}\n\n// The response for RunQuery.\ntype RunQueryResponse struct {\n\t// A batch of query results (always present).\n\tBatch            *QueryResultBatch `protobuf:\"bytes,1,opt,name=batch\" json:\"batch,omitempty\"`\n\tXXX_unrecognized []byte            `json:\"-\"`\n}\n\nfunc (m *RunQueryResponse) Reset()         { *m = RunQueryResponse{} }\nfunc (m *RunQueryResponse) String() string { return proto.CompactTextString(m) }\nfunc (*RunQueryResponse) ProtoMessage()    {}\n\nfunc (m *RunQueryResponse) GetBatch() *QueryResultBatch {\n\tif m != nil {\n\t\treturn m.Batch\n\t}\n\treturn nil\n}\n\n// The request for BeginTransaction.\ntype BeginTransactionRequest struct {\n\t// The transaction isolation level.\n\tIsolationLevel   *BeginTransactionRequest_IsolationLevel `protobuf:\"varint,1,opt,name=isolation_level,enum=datastore.BeginTransactionRequest_IsolationLevel,def=0\" json:\"isolation_level,omitempty\"`\n\tXXX_unrecognized []byte                                  `json:\"-\"`\n}\n\nfunc (m *BeginTransactionRequest) Reset()         { *m = BeginTransactionRequest{} }\nfunc (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }\nfunc (*BeginTransactionRequest) ProtoMessage()    {}\n\nconst Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT\n\nfunc (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel {\n\tif m != nil && m.IsolationLevel != nil {\n\t\treturn *m.IsolationLevel\n\t}\n\treturn Default_BeginTransactionRequest_IsolationLevel\n}\n\n// The response for BeginTransaction.\ntype BeginTransactionResponse struct {\n\t// The transaction identifier (always present).\n\tTransaction      []byte `protobuf:\"bytes,1,opt,name=transaction\" json:\"transaction,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *BeginTransactionResponse) Reset()         { *m = BeginTransactionResponse{} }\nfunc (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) }\nfunc (*BeginTransactionResponse) ProtoMessage()    {}\n\nfunc (m *BeginTransactionResponse) GetTransaction() []byte {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\n// The request for Rollback.\ntype RollbackRequest struct {\n\t// The transaction identifier, returned by a call to\n\t// <code>beginTransaction</code>.\n\tTransaction      []byte `protobuf:\"bytes,1,req,name=transaction\" json:\"transaction,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *RollbackRequest) Reset()         { *m = RollbackRequest{} }\nfunc (m *RollbackRequest) String() string { return proto.CompactTextString(m) }\nfunc (*RollbackRequest) ProtoMessage()    {}\n\nfunc (m *RollbackRequest) GetTransaction() []byte {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\n// The response for Rollback.\ntype RollbackResponse struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *RollbackResponse) Reset()         { *m = RollbackResponse{} }\nfunc (m *RollbackResponse) String() string { return proto.CompactTextString(m) }\nfunc (*RollbackResponse) ProtoMessage()    {}\n\n// The request for Commit.\ntype CommitRequest struct {\n\t// The transaction identifier, returned by a call to\n\t// <code>beginTransaction</code>. Must be set when mode is TRANSACTIONAL.\n\tTransaction []byte `protobuf:\"bytes,1,opt,name=transaction\" json:\"transaction,omitempty\"`\n\t// The mutation to perform. Optional.\n\tMutation *Mutation `protobuf:\"bytes,2,opt,name=mutation\" json:\"mutation,omitempty\"`\n\t// The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.\n\tMode             *CommitRequest_Mode `protobuf:\"varint,5,opt,name=mode,enum=datastore.CommitRequest_Mode,def=1\" json:\"mode,omitempty\"`\n\tXXX_unrecognized []byte              `json:\"-\"`\n}\n\nfunc (m *CommitRequest) Reset()         { *m = CommitRequest{} }\nfunc (m *CommitRequest) String() string { return proto.CompactTextString(m) }\nfunc (*CommitRequest) ProtoMessage()    {}\n\nconst Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL\n\nfunc (m *CommitRequest) GetTransaction() []byte {\n\tif m != nil {\n\t\treturn m.Transaction\n\t}\n\treturn nil\n}\n\nfunc (m *CommitRequest) GetMutation() *Mutation {\n\tif m != nil {\n\t\treturn m.Mutation\n\t}\n\treturn nil\n}\n\nfunc (m *CommitRequest) GetMode() CommitRequest_Mode {\n\tif m != nil && m.Mode != nil {\n\t\treturn *m.Mode\n\t}\n\treturn Default_CommitRequest_Mode\n}\n\n// The response for Commit.\ntype CommitResponse struct {\n\t// The result of performing the mutation (if any).\n\tMutationResult   *MutationResult `protobuf:\"bytes,1,opt,name=mutation_result\" json:\"mutation_result,omitempty\"`\n\tXXX_unrecognized []byte          `json:\"-\"`\n}\n\nfunc (m *CommitResponse) Reset()         { *m = CommitResponse{} }\nfunc (m *CommitResponse) String() string { return proto.CompactTextString(m) }\nfunc (*CommitResponse) ProtoMessage()    {}\n\nfunc (m *CommitResponse) GetMutationResult() *MutationResult {\n\tif m != nil {\n\t\treturn m.MutationResult\n\t}\n\treturn nil\n}\n\n// The request for AllocateIds.\ntype AllocateIdsRequest struct {\n\t// A list of keys with incomplete key paths to allocate IDs for.\n\t// No key may be reserved/read-only.\n\tKey              []*Key `protobuf:\"bytes,1,rep,name=key\" json:\"key,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *AllocateIdsRequest) Reset()         { *m = AllocateIdsRequest{} }\nfunc (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*AllocateIdsRequest) ProtoMessage()    {}\n\nfunc (m *AllocateIdsRequest) GetKey() []*Key {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\n// The response for AllocateIds.\ntype AllocateIdsResponse struct {\n\t// The keys specified in the request (in the same order), each with\n\t// its key path completed with a newly allocated ID.\n\tKey              []*Key `protobuf:\"bytes,1,rep,name=key\" json:\"key,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *AllocateIdsResponse) Reset()         { *m = AllocateIdsResponse{} }\nfunc (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }\nfunc (*AllocateIdsResponse) ProtoMessage()    {}\n\nfunc (m *AllocateIdsResponse) GetKey() []*Key {\n\tif m != nil {\n\t\treturn m.Key\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"datastore.EntityResult_ResultType\", EntityResult_ResultType_name, EntityResult_ResultType_value)\n\tproto.RegisterEnum(\"datastore.PropertyExpression_AggregationFunction\", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value)\n\tproto.RegisterEnum(\"datastore.PropertyOrder_Direction\", PropertyOrder_Direction_name, PropertyOrder_Direction_value)\n\tproto.RegisterEnum(\"datastore.CompositeFilter_Operator\", CompositeFilter_Operator_name, CompositeFilter_Operator_value)\n\tproto.RegisterEnum(\"datastore.PropertyFilter_Operator\", PropertyFilter_Operator_name, PropertyFilter_Operator_value)\n\tproto.RegisterEnum(\"datastore.QueryResultBatch_MoreResultsType\", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value)\n\tproto.RegisterEnum(\"datastore.ReadOptions_ReadConsistency\", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value)\n\tproto.RegisterEnum(\"datastore.BeginTransactionRequest_IsolationLevel\", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value)\n\tproto.RegisterEnum(\"datastore.CommitRequest_Mode\", CommitRequest_Mode_name, CommitRequest_Mode_value)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/internal/datastore/datastore_v1.proto",
    "content": "// Copyright 2013 Google Inc. All Rights Reserved.\n//\n// The datastore v1 service proto definitions\n\nsyntax = \"proto2\";\n\npackage datastore;\noption java_package = \"com.google.api.services.datastore\";\n\n\n// An identifier for a particular subset of entities.\n//\n// Entities are partitioned into various subsets, each used by different\n// datasets and different namespaces within a dataset and so forth.\n//\n// All input partition IDs are normalized before use.\n// A partition ID is normalized as follows:\n//   If the partition ID is unset or is set to an empty partition ID, replace it\n//       with the context partition ID.\n//   Otherwise, if the partition ID has no dataset ID, assign it the context\n//       partition ID's dataset ID.\n// Unless otherwise documented, the context partition ID has the dataset ID set\n// to the context dataset ID and no other partition dimension set.\n//\n// A partition ID is empty if all of its fields are unset.\n//\n// Partition dimension:\n// A dimension may be unset.\n// A dimension's value must never be \"\".\n// A dimension's value must match [A-Za-z\\d\\.\\-_]{1,100}\n// If the value of any dimension matches regex \"__.*__\",\n// the partition is reserved/read-only.\n// A reserved/read-only partition ID is forbidden in certain documented contexts.\n//\n// Dataset ID:\n// A dataset id's value must never be \"\".\n// A dataset id's value must match\n// ([a-z\\d\\-]{1,100}~)?([a-z\\d][a-z\\d\\-\\.]{0,99}:)?([a-z\\d][a-z\\d\\-]{0,99}\nmessage PartitionId {\n  // The dataset ID.\n  optional string dataset_id = 3;\n  // The namespace.\n  optional string namespace = 4;\n}\n\n// A unique identifier for an entity.\n// If a key's partition id or any of its path kinds or names are\n// reserved/read-only, the key is reserved/read-only.\n// A reserved/read-only key is forbidden in certain documented contexts.\nmessage Key {\n  // Entities are partitioned into subsets, currently identified by a dataset\n  // (usually implicitly specified by the project) and namespace ID.\n  // Queries are scoped to a single partition.\n  optional PartitionId partition_id = 1;\n\n  // A (kind, ID/name) pair used to construct a key path.\n  //\n  // At most one of name or ID may be set.\n  // If either is set, the element is complete.\n  // If neither is set, the element is incomplete.\n  message PathElement {\n    // The kind of the entity.\n    // A kind matching regex \"__.*__\" is reserved/read-only.\n    // A kind must not contain more than 500 characters.\n    // Cannot be \"\".\n    required string kind = 1;\n    // The ID of the entity.\n    // Never equal to zero. Values less than zero are discouraged and will not\n    // be supported in the future.\n    optional int64 id = 2;\n    // The name of the entity.\n    // A name matching regex \"__.*__\" is reserved/read-only.\n    // A name must not be more than 500 characters.\n    // Cannot be \"\".\n    optional string name = 3;\n  }\n\n  // The entity path.\n  // An entity path consists of one or more elements composed of a kind and a\n  // string or numerical identifier, which identify entities. The first\n  // element identifies a <em>root entity</em>, the second element identifies\n  // a <em>child</em> of the root entity, the third element a child of the\n  // second entity, and so forth. The entities identified by all prefixes of\n  // the path are called the element's <em>ancestors</em>.\n  // An entity path is always fully complete: ALL of the entity's ancestors\n  // are required to be in the path along with the entity identifier itself.\n  // The only exception is that in some documented cases, the identifier in the\n  // last path element (for the entity) itself may be omitted. A path can never\n  // be empty.\n  repeated PathElement path_element = 2;\n}\n\n// A message that can hold any of the supported value types and associated\n// metadata.\n//\n// At most one of the <type>Value fields may be set.\n// If none are set the value is \"null\".\n//\nmessage Value {\n  // A boolean value.\n  optional bool boolean_value = 1;\n  // An integer value.\n  optional int64 integer_value = 2;\n  // A double value.\n  optional double double_value = 3;\n  // A timestamp value.\n  optional int64 timestamp_microseconds_value = 4;\n  // A key value.\n  optional Key key_value  = 5;\n  // A blob key value.\n  optional string blob_key_value = 16;\n  // A UTF-8 encoded string value.\n  optional string string_value = 17;\n  // A blob value.\n  optional bytes blob_value = 18;\n  // An entity value.\n  // May have no key.\n  // May have a key with an incomplete key path.\n  // May have a reserved/read-only key.\n  optional Entity entity_value = 6;\n  // A list value.\n  // Cannot contain another list value.\n  // Cannot also have a meaning and indexing set.\n  repeated Value list_value = 7;\n\n  // The <code>meaning</code> field is reserved and should not be used.\n  optional int32 meaning = 14;\n\n  // If the value should be indexed.\n  //\n  // The <code>indexed</code> property may be set for a\n  // <code>null</code> value.\n  // When <code>indexed</code> is <code>true</code>, <code>stringValue</code>\n  // is limited to 500 characters and the blob value is limited to 500 bytes.\n  // Exception: If meaning is set to 2, string_value is limited to 2038\n  // characters regardless of indexed.\n  // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16\n  // will be ignored on input (and will never be set on output).\n  // Input values by default have <code>indexed</code> set to\n  // <code>true</code>; however, you can explicitly set <code>indexed</code> to\n  // <code>true</code> if you want. (An output value never has\n  // <code>indexed</code> explicitly set to <code>true</code>.) If a value is\n  // itself an entity, it cannot have <code>indexed</code> set to\n  // <code>true</code>.\n  // Exception: An entity value with meaning 9, 20 or 21 may be indexed.\n  optional bool indexed = 15 [default = true];\n}\n\n// An entity property.\nmessage Property {\n  // The name of the property.\n  // A property name matching regex \"__.*__\" is reserved.\n  // A reserved property name is forbidden in certain documented contexts.\n  // The name must not contain more than 500 characters.\n  // Cannot be \"\".\n  required string name = 1;\n\n  // The value(s) of the property.\n  // Each value can have only one value property populated. For example,\n  // you cannot have a values list of <code>{ value: { integerValue: 22,\n  // stringValue: \"a\" } }</code>, but you can have <code>{ value: { listValue:\n  // [ { integerValue: 22 }, { stringValue: \"a\" } ] }</code>.\n  required Value value = 4;\n}\n\n// An entity.\n//\n// An entity is limited to 1 megabyte when stored. That <em>roughly</em>\n// corresponds to a limit of 1 megabyte for the serialized form of this\n// message.\nmessage Entity {\n  // The entity's key.\n  //\n  // An entity must have a key, unless otherwise documented (for example,\n  // an entity in <code>Value.entityValue</code> may have no key).\n  // An entity's kind is its key's path's last element's kind,\n  // or null if it has no key.\n  optional Key key = 1;\n  // The entity's properties.\n  // Each property's name must be unique for its entity.\n  repeated Property property = 2;\n}\n\n// The result of fetching an entity from the datastore.\nmessage EntityResult {\n  // Specifies what data the 'entity' field contains.\n  // A ResultType is either implied (for example, in LookupResponse.found it\n  // is always FULL) or specified by context (for example, in message\n  // QueryResultBatch, field 'entity_result_type' specifies a ResultType\n  // for all the values in field 'entity_result').\n  enum ResultType {\n    FULL = 1;  // The entire entity.\n    PROJECTION = 2;  // A projected subset of properties.\n                     // The entity may have no key.\n                     // A property value may have meaning 18.\n    KEY_ONLY = 3;  // Only the key.\n  }\n\n  // The resulting entity.\n  required Entity entity = 1;\n}\n\n// A query.\nmessage Query {\n  // The projection to return. If not set the entire entity is returned.\n  repeated PropertyExpression projection = 2;\n\n  // The kinds to query (if empty, returns entities from all kinds).\n  repeated KindExpression kind = 3;\n\n  // The filter to apply (optional).\n  optional Filter filter = 4;\n\n  // The order to apply to the query results (if empty, order is unspecified).\n  repeated PropertyOrder order = 5;\n\n  // The properties to group by (if empty, no grouping is applied to the\n  // result set).\n  repeated PropertyReference group_by = 6;\n\n  // A starting point for the query results. Optional. Query cursors are\n  // returned in query result batches.\n  optional bytes /* serialized QueryCursor */ start_cursor = 7;\n\n  // An ending point for the query results. Optional. Query cursors are\n  // returned in query result batches.\n  optional bytes /* serialized QueryCursor */ end_cursor = 8;\n\n  // The number of results to skip. Applies before limit, but after all other\n  // constraints (optional, defaults to 0).\n  optional int32 offset = 10 [default=0];\n\n  // The maximum number of results to return. Applies after all other\n  // constraints. Optional.\n  optional int32 limit = 11;\n}\n\n// A representation of a kind.\nmessage KindExpression {\n  // The name of the kind.\n  required string name = 1;\n}\n\n// A reference to a property relative to the kind expressions.\n// exactly.\nmessage PropertyReference {\n  // The name of the property.\n  required string name = 2;\n}\n\n// A representation of a property in a projection.\nmessage PropertyExpression {\n  enum AggregationFunction {\n    FIRST = 1;\n  }\n  // The property to project.\n  required PropertyReference property = 1;\n  // The aggregation function to apply to the property. Optional.\n  // Can only be used when grouping by at least one property. Must\n  // then be set on all properties in the projection that are not\n  // being grouped by.\n  optional AggregationFunction aggregation_function = 2;\n}\n\n// The desired order for a specific property.\nmessage PropertyOrder {\n  enum Direction {\n    ASCENDING = 1;\n    DESCENDING = 2;\n  }\n  // The property to order by.\n  required PropertyReference property = 1;\n  // The direction to order by.\n  optional Direction direction = 2 [default=ASCENDING];\n}\n\n// A holder for any type of filter. Exactly one field should be specified.\nmessage Filter {\n  // A composite filter.\n  optional CompositeFilter composite_filter = 1;\n  // A filter on a property.\n  optional PropertyFilter property_filter = 2;\n}\n\n// A filter that merges the multiple other filters using the given operation.\nmessage CompositeFilter {\n  enum Operator {\n    AND = 1;\n  }\n\n  // The operator for combining multiple filters.\n  required Operator operator = 1;\n  // The list of filters to combine.\n  // Must contain at least one filter.\n  repeated Filter filter = 2;\n}\n\n// A filter on a specific property.\nmessage PropertyFilter {\n  enum Operator {\n    LESS_THAN = 1;\n    LESS_THAN_OR_EQUAL = 2;\n    GREATER_THAN = 3;\n    GREATER_THAN_OR_EQUAL = 4;\n    EQUAL = 5;\n\n    HAS_ANCESTOR = 11;\n  }\n\n  // The property to filter by.\n  required PropertyReference property = 1;\n  // The operator to filter by.\n  required Operator operator = 2;\n  // The value to compare the property to.\n  required Value value = 3;\n}\n\n// A GQL query.\nmessage GqlQuery {\n  required string query_string = 1;\n  // When false, the query string must not contain a literal.\n  optional bool allow_literal = 2 [default = false];\n  // A named argument must set field GqlQueryArg.name.\n  // No two named arguments may have the same name.\n  // For each non-reserved named binding site in the query string,\n  // there must be a named argument with that name,\n  // but not necessarily the inverse.\n  repeated GqlQueryArg name_arg = 3;\n  // Numbered binding site @1 references the first numbered argument,\n  // effectively using 1-based indexing, rather than the usual 0.\n  // A numbered argument must NOT set field GqlQueryArg.name.\n  // For each binding site numbered i in query_string,\n  // there must be an ith numbered argument.\n  // The inverse must also be true.\n  repeated GqlQueryArg number_arg = 4;\n}\n\n// A binding argument for a GQL query.\n// Exactly one of fields value and cursor must be set.\nmessage GqlQueryArg {\n  // Must match regex \"[A-Za-z_$][A-Za-z_$0-9]*\".\n  // Must not match regex \"__.*__\".\n  // Must not be \"\".\n  optional string name = 1;\n  optional Value value = 2;\n  optional bytes cursor = 3;\n}\n\n// A batch of results produced by a query.\nmessage QueryResultBatch {\n  // The possible values for the 'more_results' field.\n  enum MoreResultsType {\n    NOT_FINISHED = 1;  // There are additional batches to fetch from this query.\n    MORE_RESULTS_AFTER_LIMIT = 2;  // The query is finished, but there are more\n                                   // results after the limit.\n    NO_MORE_RESULTS = 3;  // The query has been exhausted.\n  }\n\n  // The result type for every entity in entityResults.\n  required EntityResult.ResultType entity_result_type = 1;\n  // The results for this batch.\n  repeated EntityResult entity_result = 2;\n\n  // A cursor that points to the position after the last result in the batch.\n  // May be absent.\n  optional bytes /* serialized QueryCursor */ end_cursor = 4;\n\n  // The state of the query after the current batch.\n  required MoreResultsType more_results = 5;\n\n  // The number of results skipped because of <code>Query.offset</code>.\n  optional int32 skipped_results = 6;\n}\n\n// A set of changes to apply.\n//\n// No entity in this message may have a reserved property name,\n// not even a property in an entity in a value.\n// No value in this message may have meaning 18,\n// not even a value in an entity in another value.\n//\n// If entities with duplicate keys are present, an arbitrary choice will\n// be made as to which is written.\nmessage Mutation {\n  // Entities to upsert.\n  // Each upserted entity's key must have a complete path and\n  // must not be reserved/read-only.\n  repeated Entity upsert = 1;\n  // Entities to update.\n  // Each updated entity's key must have a complete path and\n  // must not be reserved/read-only.\n  repeated Entity update = 2;\n  // Entities to insert.\n  // Each inserted entity's key must have a complete path and\n  // must not be reserved/read-only.\n  repeated Entity insert = 3;\n  // Insert entities with a newly allocated ID.\n  // Each inserted entity's key must omit the final identifier in its path and\n  // must not be reserved/read-only.\n  repeated Entity insert_auto_id = 4;\n  // Keys of entities to delete.\n  // Each key must have a complete key path and must not be reserved/read-only.\n  repeated Key delete = 5;\n  // Ignore a user specified read-only period. Optional.\n  optional bool force = 6;\n}\n\n// The result of applying a mutation.\nmessage MutationResult {\n  // Number of index writes.\n  required int32 index_updates = 1;\n  // Keys for <code>insertAutoId</code> entities. One per entity from the\n  // request, in the same order.\n  repeated Key insert_auto_id_key = 2;\n}\n\n// Options shared by read requests.\nmessage ReadOptions {\n  enum ReadConsistency {\n    DEFAULT = 0;\n    STRONG = 1;\n    EVENTUAL = 2;\n  }\n\n  // The read consistency to use.\n  // Cannot be set when transaction is set.\n  // Lookup and ancestor queries default to STRONG, global queries default to\n  // EVENTUAL and cannot be set to STRONG.\n  optional ReadConsistency read_consistency = 1 [default=DEFAULT];\n\n  // The transaction to use. Optional.\n  optional bytes /* serialized Transaction */ transaction = 2;\n}\n\n// The request for Lookup.\nmessage LookupRequest {\n\n  // Options for this lookup request. Optional.\n  optional ReadOptions read_options = 1;\n  // Keys of entities to look up from the datastore.\n  repeated Key key = 3;\n}\n\n// The response for Lookup.\nmessage LookupResponse {\n\n  // The order of results in these fields is undefined and has no relation to\n  // the order of the keys in the input.\n\n  // Entities found as ResultType.FULL entities.\n  repeated EntityResult found = 1;\n\n  // Entities not found as ResultType.KEY_ONLY entities.\n  repeated EntityResult missing = 2;\n\n  // A list of keys that were not looked up due to resource constraints.\n  repeated Key deferred = 3;\n}\n\n\n// The request for RunQuery.\nmessage RunQueryRequest {\n\n  // The options for this query.\n  optional ReadOptions read_options = 1;\n\n  // Entities are partitioned into subsets, identified by a dataset (usually\n  // implicitly specified by the project) and namespace ID. Queries are scoped\n  // to a single partition.\n  // This partition ID is normalized with the standard default context\n  // partition ID, but all other partition IDs in RunQueryRequest are\n  // normalized with this partition ID as the context partition ID.\n  optional PartitionId partition_id = 2;\n\n  // The query to run.\n  // Either this field or field gql_query must be set, but not both.\n  optional Query query = 3;\n  // The GQL query to run.\n  // Either this field or field query must be set, but not both.\n  optional GqlQuery gql_query = 7;\n}\n\n// The response for RunQuery.\nmessage RunQueryResponse {\n\n  // A batch of query results (always present).\n  optional QueryResultBatch batch = 1;\n\n}\n\n// The request for BeginTransaction.\nmessage BeginTransactionRequest {\n\n  enum IsolationLevel {\n    SNAPSHOT = 0;  // Read from a consistent snapshot. Concurrent transactions\n                   // conflict if their mutations conflict. For example:\n                   // Read(A),Write(B) may not conflict with Read(B),Write(A),\n                   // but Read(B),Write(B) does conflict with Read(B),Write(B).\n    SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent\n                      // transactions conflict if they cannot be serialized.\n                      // For example Read(A),Write(B) does conflict with\n                      // Read(B),Write(A) but Read(A) may not conflict with\n                      // Write(A).\n  }\n\n  // The transaction isolation level.\n  optional IsolationLevel isolation_level = 1 [default=SNAPSHOT];\n}\n\n// The response for BeginTransaction.\nmessage BeginTransactionResponse {\n\n  // The transaction identifier (always present).\n  optional bytes /* serialized Transaction */ transaction = 1;\n}\n\n// The request for Rollback.\nmessage RollbackRequest {\n\n  // The transaction identifier, returned by a call to\n  // <code>beginTransaction</code>.\n  required bytes /* serialized Transaction */ transaction = 1;\n}\n\n// The response for Rollback.\nmessage RollbackResponse {\n// Empty\n}\n\n// The request for Commit.\nmessage CommitRequest {\n\n  enum Mode {\n    TRANSACTIONAL = 1;\n    NON_TRANSACTIONAL = 2;\n  }\n\n  // The transaction identifier, returned by a call to\n  // <code>beginTransaction</code>. Must be set when mode is TRANSACTIONAL.\n  optional bytes /* serialized Transaction */ transaction = 1;\n  // The mutation to perform. Optional.\n  optional Mutation mutation = 2;\n  // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.\n  optional Mode mode = 5 [default=TRANSACTIONAL];\n}\n\n// The response for Commit.\nmessage CommitResponse {\n\n  // The result of performing the mutation (if any).\n  optional MutationResult mutation_result = 1;\n}\n\n// The request for AllocateIds.\nmessage AllocateIdsRequest {\n\n  // A list of keys with incomplete key paths to allocate IDs for.\n  // No key may be reserved/read-only.\n  repeated Key key = 1;\n}\n\n// The response for AllocateIds.\nmessage AllocateIdsResponse {\n\n  // The keys specified in the request (in the same order), each with\n  // its key path completed with a newly allocated ID.\n  repeated Key key = 1;\n}\n\n// Each rpc normalizes the partition IDs of the keys in its input entities,\n// and always returns entities with keys with normalized partition IDs.\n// (Note that applies to all entities, including entities in values.)\nservice DatastoreService {\n  // Look up some entities by key.\n  rpc Lookup(LookupRequest) returns (LookupResponse) {\n  };\n  // Query for entities.\n  rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {\n  };\n  // Begin a new transaction.\n  rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {\n  };\n  // Commit a transaction, optionally creating, deleting or modifying some\n  // entities.\n  rpc Commit(CommitRequest) returns (CommitResponse) {\n  };\n  // Roll back a transaction.\n  rpc Rollback(RollbackRequest) returns (RollbackResponse) {\n  };\n  // Allocate IDs for incomplete keys (useful for referencing an entity before\n  // it is inserted).\n  rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) {\n  };\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/internal/testutil/context.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package testutil contains helper functions for writing tests.\npackage testutil\n\nimport (\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/cloud\"\n)\n\nconst (\n\tenvProjID     = \"GCLOUD_TESTS_GOLANG_PROJECT_ID\"\n\tenvPrivateKey = \"GCLOUD_TESTS_GOLANG_KEY\"\n)\n\nfunc Context(scopes ...string) context.Context {\n\tkey, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID)\n\tif key == \"\" || projID == \"\" {\n\t\tlog.Fatal(\"GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.\")\n\t}\n\tjsonKey, err := ioutil.ReadFile(key)\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot read the JSON key file, err: %v\", err)\n\t}\n\tconf, err := google.JWTConfigFromJSON(jsonKey, scopes...)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn cloud.NewContext(projID, conf.Client(oauth2.NoContext))\n}\n\nfunc NoAuthContext() context.Context {\n\tprojID := os.Getenv(envProjID)\n\tif projID == \"\" {\n\t\tlog.Fatal(\"GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.\")\n\t}\n\treturn cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport})\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/option.go",
    "content": "/*\nCopyright 2015 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cloud\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n)\n\ntype dialOpt struct {\n\tendpoint string\n\tscopes   []string\n\n\ttokenSource oauth2.TokenSource\n\n\thttpClient *http.Client\n\tgrpcClient *grpc.ClientConn\n}\n\n// ClientOption is used when construct clients for each cloud service.\ntype ClientOption interface {\n\tresolve(*dialOpt)\n}\n\n// WithTokenSource returns a ClientOption that specifies an OAuth2 token\n// source to be used as the basis for authentication.\nfunc WithTokenSource(s oauth2.TokenSource) ClientOption {\n\treturn withTokenSource{s}\n}\n\ntype withTokenSource struct{ ts oauth2.TokenSource }\n\nfunc (w withTokenSource) resolve(o *dialOpt) {\n\to.tokenSource = w.ts\n}\n\n// WithEndpoint returns a ClientOption that overrides the default endpoint\n// to be used for a service.\nfunc WithEndpoint(url string) ClientOption {\n\treturn withEndpoint(url)\n}\n\ntype withEndpoint string\n\nfunc (w withEndpoint) resolve(o *dialOpt) {\n\to.endpoint = string(w)\n}\n\n// WithScopes returns a ClientOption that overrides the default OAuth2 scopes\n// to be used for a service.\nfunc WithScopes(scope ...string) ClientOption {\n\treturn withScopes(scope)\n}\n\ntype withScopes []string\n\nfunc (w withScopes) resolve(o *dialOpt) {\n\to.scopes = []string(w)\n}\n\n// WithBaseHTTP returns a ClientOption that specifies the HTTP client to\n// use as the basis of communications. This option may only be used with\n// services that support HTTP as their communication transport.\nfunc WithBaseHTTP(client *http.Client) ClientOption {\n\treturn withBaseHTTP{client}\n}\n\ntype withBaseHTTP struct{ client *http.Client }\n\nfunc (w withBaseHTTP) resolve(o *dialOpt) {\n\to.httpClient = w.client\n}\n\n// WithBaseGRPC returns a ClientOption that specifies the GRPC client\n// connection to use as the basis of communications. This option many only be\n// used with services that support HRPC as their communication transport.\nfunc WithBaseGRPC(client *grpc.ClientConn) ClientOption {\n\treturn withBaseGRPC{client}\n}\n\ntype withBaseGRPC struct{ client *grpc.ClientConn }\n\nfunc (w withBaseGRPC) resolve(o *dialOpt) {\n\to.grpcClient = w.client\n}\n\n// DialHTTP returns an HTTP client for use communicating with a Google cloud\n// service, configured with the given ClientOptions. Most developers should\n// call the relevant NewClient method for the target service rather than\n// invoking DialHTTP directly.\nfunc DialHTTP(ctx context.Context, opt ...ClientOption) (*http.Client, error) {\n\tvar o dialOpt\n\tfor _, opt := range opt {\n\t\topt.resolve(&o)\n\t}\n\tif o.grpcClient != nil {\n\t\treturn nil, errors.New(\"unsupported GRPC base transport specified\")\n\t}\n\t// TODO(djd): Wrap all http.Client's with appropriate internal version to add\n\t// UserAgent header and prepend correct endpoint.\n\tif o.httpClient != nil {\n\t\treturn o.httpClient, nil\n\t}\n\tif o.tokenSource == nil {\n\t\tvar err error\n\t\to.tokenSource, err = google.DefaultTokenSource(ctx, o.scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"google.DefaultTokenSource: %v\", err)\n\t\t}\n\t}\n\treturn oauth2.NewClient(ctx, o.tokenSource), nil\n}\n\n// DialGRPC returns a GRPC connection for use communicating with a Google cloud\n// service, configured with the given ClientOptions. Most developers should\n// call the relevant NewClient method for the target service rather than\n// invoking DialGRPC directly.\nfunc DialGRPC(ctx context.Context, opt ...ClientOption) (*grpc.ClientConn, error) {\n\tvar o dialOpt\n\tfor _, opt := range opt {\n\t\topt.resolve(&o)\n\t}\n\tif o.httpClient != nil {\n\t\treturn nil, errors.New(\"unsupported HTTP base transport specified\")\n\t}\n\tif o.grpcClient != nil {\n\t\treturn o.grpcClient, nil\n\t}\n\tif o.tokenSource == nil {\n\t\tvar err error\n\t\to.tokenSource, err = google.DefaultTokenSource(ctx, o.scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"google.DefaultTokenSource: %v\", err)\n\t\t}\n\t}\n\tgrpcOpts := []grpc.DialOption{\n\t\tgrpc.WithPerRPCCredentials(credentials.TokenSource{o.tokenSource}),\n\t\tgrpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, \"\")),\n\t}\n\treturn grpc.Dial(o.endpoint, grpcOpts...)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/pubsub/pubsub.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package pubsub contains a Google Cloud Pub/Sub client.\n//\n// This package is experimental and may make backwards-incompatible changes.\n//\n// More information about Google Cloud Pub/Sub is available at\n// https://cloud.google.com/pubsub/docs\npackage pubsub\n\nimport (\n\t\"encoding/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"google.golang.org/cloud/internal\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/googleapi\"\n\traw \"google.golang.org/api/pubsub/v1beta2\"\n)\n\nconst (\n\t// ScopePubSub grants permissions to view and manage Pub/Sub\n\t// topics and subscriptions.\n\tScopePubSub = \"https://www.googleapis.com/auth/pubsub\"\n\n\t// ScopeCloudPlatform grants permissions to view and manage your data\n\t// across Google Cloud Platform services.\n\tScopeCloudPlatform = \"https://www.googleapis.com/auth/cloud-platform\"\n)\n\n// batchLimit is maximun size of a single batch.\nconst batchLimit = 1000\n\n// Message represents a Pub/Sub message.\ntype Message struct {\n\t// ID identifies this message.\n\tID string\n\n\t// AckID is the identifier to acknowledge this message.\n\tAckID string\n\n\t// Data is the actual data in the message.\n\tData []byte\n\n\t// Attributes represents the key-value pairs the current message\n\t// is labelled with.\n\tAttributes map[string]string\n}\n\n// TODO(jbd): Add subscription and topic listing.\n\n// CreateSub creates a Pub/Sub subscription on the backend.\n// A subscription should subscribe to an existing topic.\n//\n// The messages that haven't acknowledged will be pushed back to the\n// subscription again when the default acknowledgement deadline is\n// reached. You can override the default deadline by providing a\n// non-zero deadline. Deadline must not be specified to\n// precision greater than one second.\n//\n// As new messages are being queued on the subscription, you\n// may recieve push notifications regarding to the new arrivals.\n// To receive notifications of new messages in the queue,\n// specify an endpoint callback URL.\n// If endpoint is an empty string the backend will not notify the\n// client of new messages.\n//\n// If the subscription already exists an error will be returned.\nfunc CreateSub(ctx context.Context, name string, topic string, deadline time.Duration, endpoint string) error {\n\tsub := &raw.Subscription{\n\t\tTopic: fullTopicName(internal.ProjID(ctx), topic),\n\t}\n\tif int64(deadline) > 0 {\n\t\tif !isSec(deadline) {\n\t\t\treturn errors.New(\"pubsub: deadline must not be specified to precision greater than one second\")\n\t\t}\n\t\tsub.AckDeadlineSeconds = int64(deadline / time.Second)\n\t}\n\tif endpoint != \"\" {\n\t\tsub.PushConfig = &raw.PushConfig{PushEndpoint: endpoint}\n\t}\n\t_, err := rawService(ctx).Projects.Subscriptions.Create(fullSubName(internal.ProjID(ctx), name), sub).Do()\n\treturn err\n}\n\n// DeleteSub deletes the subscription.\nfunc DeleteSub(ctx context.Context, name string) error {\n\t_, err := rawService(ctx).Projects.Subscriptions.Delete(fullSubName(internal.ProjID(ctx), name)).Do()\n\treturn err\n}\n\n// ModifyAckDeadline modifies the acknowledgement deadline\n// for the messages retrieved from the specified subscription.\n// Deadline must not be specified to precision greater than one second.\nfunc ModifyAckDeadline(ctx context.Context, sub string, id string, deadline time.Duration) error {\n\tif !isSec(deadline) {\n\t\treturn errors.New(\"pubsub: deadline must not be specified to precision greater than one second\")\n\t}\n\t_, err := rawService(ctx).Projects.Subscriptions.ModifyAckDeadline(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyAckDeadlineRequest{\n\t\tAckDeadlineSeconds: int64(deadline / time.Second),\n\t\tAckId:              id,\n\t}).Do()\n\treturn err\n}\n\n// ModifyPushEndpoint modifies the URL endpoint to modify the resource\n// to handle push notifications coming from the Pub/Sub backend\n// for the specified subscription.\nfunc ModifyPushEndpoint(ctx context.Context, sub, endpoint string) error {\n\t_, err := rawService(ctx).Projects.Subscriptions.ModifyPushConfig(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyPushConfigRequest{\n\t\tPushConfig: &raw.PushConfig{\n\t\t\tPushEndpoint: endpoint,\n\t\t},\n\t}).Do()\n\treturn err\n}\n\n// SubExists returns true if subscription exists.\nfunc SubExists(ctx context.Context, name string) (bool, error) {\n\t_, err := rawService(ctx).Projects.Subscriptions.Get(fullSubName(internal.ProjID(ctx), name)).Do()\n\tif e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// Ack acknowledges one or more Pub/Sub messages on the\n// specified subscription.\nfunc Ack(ctx context.Context, sub string, id ...string) error {\n\tfor idx, ackID := range id {\n\t\tif ackID == \"\" {\n\t\t\treturn fmt.Errorf(\"pubsub: empty ackID detected at index %d\", idx)\n\t\t}\n\t}\n\t_, err := rawService(ctx).Projects.Subscriptions.Acknowledge(fullSubName(internal.ProjID(ctx), sub), &raw.AcknowledgeRequest{\n\t\tAckIds: id,\n\t}).Do()\n\treturn err\n}\n\nfunc toMessage(resp *raw.ReceivedMessage) (*Message, error) {\n\tif resp.Message == nil {\n\t\treturn &Message{AckID: resp.AckId}, nil\n\t}\n\tdata, err := base64.StdEncoding.DecodeString(resp.Message.Data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Message{\n\t\tAckID:      resp.AckId,\n\t\tData:       data,\n\t\tAttributes: resp.Message.Attributes,\n\t\tID:         resp.Message.MessageId,\n\t}, nil\n}\n\n// Pull pulls messages from the subscription. It returns up to n\n// number of messages, and n could not be larger than 100.\nfunc Pull(ctx context.Context, sub string, n int) ([]*Message, error) {\n\treturn pull(ctx, sub, n, true)\n}\n\n// PullWait pulls messages from the subscription. If there are not\n// enough messages left in the subscription queue, it will block until\n// at least n number of messages arrive or timeout occurs, and n could\n// not be larger than 100.\nfunc PullWait(ctx context.Context, sub string, n int) ([]*Message, error) {\n\treturn pull(ctx, sub, n, false)\n}\n\nfunc pull(ctx context.Context, sub string, n int, retImmediately bool) ([]*Message, error) {\n\tif n < 1 || n > batchLimit {\n\t\treturn nil, fmt.Errorf(\"pubsub: cannot pull less than one, more than %d messages, but %d was given\", batchLimit, n)\n\t}\n\tresp, err := rawService(ctx).Projects.Subscriptions.Pull(fullSubName(internal.ProjID(ctx), sub), &raw.PullRequest{\n\t\tReturnImmediately: retImmediately,\n\t\tMaxMessages:       int64(n),\n\t}).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmsgs := make([]*Message, len(resp.ReceivedMessages))\n\tfor i := 0; i < len(resp.ReceivedMessages); i++ {\n\t\tmsg, err := toMessage(resp.ReceivedMessages[i])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"pubsub: cannot decode the retrieved message at index: %d, PullResponse: %+v\", i, resp.ReceivedMessages[i])\n\t\t}\n\t\tmsgs[i] = msg\n\t}\n\treturn msgs, nil\n}\n\n// CreateTopic creates a new topic with the specified name on the backend.\n// It will return an error if topic already exists.\nfunc CreateTopic(ctx context.Context, name string) error {\n\t_, err := rawService(ctx).Projects.Topics.Create(fullTopicName(internal.ProjID(ctx), name), &raw.Topic{}).Do()\n\treturn err\n}\n\n// DeleteTopic deletes the specified topic.\nfunc DeleteTopic(ctx context.Context, name string) error {\n\t_, err := rawService(ctx).Projects.Topics.Delete(fullTopicName(internal.ProjID(ctx), name)).Do()\n\treturn err\n}\n\n// TopicExists returns true if a topic exists with the specified name.\nfunc TopicExists(ctx context.Context, name string) (bool, error) {\n\t_, err := rawService(ctx).Projects.Topics.Get(fullTopicName(internal.ProjID(ctx), name)).Do()\n\tif e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// Publish publish messages to the topic's subscribers. It returns\n// message IDs upon success.\nfunc Publish(ctx context.Context, topic string, msgs ...*Message) ([]string, error) {\n\tvar rawMsgs []*raw.PubsubMessage\n\tif len(msgs) == 0 {\n\t\treturn nil, errors.New(\"pubsub: no messages to publish\")\n\t}\n\tif len(msgs) > batchLimit {\n\t\treturn nil, fmt.Errorf(\"pubsub: %d messages given, but maximum batch size is %d\", len(msgs), batchLimit)\n\t}\n\trawMsgs = make([]*raw.PubsubMessage, len(msgs))\n\tfor i, msg := range msgs {\n\t\trawMsgs[i] = &raw.PubsubMessage{\n\t\t\tData:       base64.StdEncoding.EncodeToString(msg.Data),\n\t\t\tAttributes: msg.Attributes,\n\t\t}\n\t}\n\tresp, err := rawService(ctx).Projects.Topics.Publish(fullTopicName(internal.ProjID(ctx), topic), &raw.PublishRequest{\n\t\tMessages: rawMsgs,\n\t}).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.MessageIds, nil\n}\n\n// fullSubName returns the fully qualified name for a subscription.\n// E.g. /subscriptions/project-id/subscription-name.\nfunc fullSubName(proj, name string) string {\n\treturn fmt.Sprintf(\"projects/%s/subscriptions/%s\", proj, name)\n}\n\n// fullTopicName returns the fully qualified name for a topic.\n// E.g. /topics/project-id/topic-name.\nfunc fullTopicName(proj, name string) string {\n\treturn fmt.Sprintf(\"projects/%s/topics/%s\", proj, name)\n}\n\nfunc isSec(dur time.Duration) bool {\n\treturn dur%time.Second == 0\n}\n\nfunc rawService(ctx context.Context) *raw.Service {\n\treturn internal.Service(ctx, \"pubsub\", func(hc *http.Client) interface{} {\n\t\tsvc, _ := raw.New(hc)\n\t\treturn svc\n\t}).(*raw.Service)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/storage/acl.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage storage\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n\traw \"google.golang.org/api/storage/v1\"\n)\n\n// ACLRole is the the access permission for the entity.\ntype ACLRole string\n\nconst (\n\tRoleOwner  ACLRole = \"OWNER\"\n\tRoleReader ACLRole = \"READER\"\n)\n\n// ACLEntity is an entity holding an ACL permission.\n//\n// It could be in the form of:\n// \"user-<userId>\", \"user-<email>\",\"group-<groupId>\", \"group-<email>\",\n// \"domain-<domain>\" and \"project-team-<projectId>\".\n//\n// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.\ntype ACLEntity string\n\nconst (\n\tAllUsers              ACLEntity = \"allUsers\"\n\tAllAuthenticatedUsers ACLEntity = \"allAuthenticatedUsers\"\n)\n\n// ACLRule represents an access control list rule entry for a Google Cloud Storage object or bucket.\n// A bucket is a Google Cloud Storage container whose name is globally unique and contains zero or\n// more objects.  An object is a blob of data that is stored in a bucket.\ntype ACLRule struct {\n\t// Entity identifies the entity holding the current rule's permissions.\n\tEntity ACLEntity\n\n\t// Role is the the access permission for the entity.\n\tRole ACLRole\n}\n\n// DefaultACL returns the default object ACL entries for the named bucket.\nfunc DefaultACL(ctx context.Context, bucket string) ([]ACLRule, error) {\n\tacls, err := rawService(ctx).DefaultObjectAccessControls.List(bucket).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"storage: error listing default object ACL for bucket %q: %v\", bucket, err)\n\t}\n\tr := make([]ACLRule, 0, len(acls.Items))\n\tfor _, v := range acls.Items {\n\t\tif m, ok := v.(map[string]interface{}); ok {\n\t\t\tentity, ok1 := m[\"entity\"].(string)\n\t\t\trole, ok2 := m[\"role\"].(string)\n\t\t\tif ok1 && ok2 {\n\t\t\t\tr = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})\n\t\t\t}\n\t\t}\n\t}\n\treturn r, nil\n}\n\n// PutDefaultACLRule saves the named default object ACL entity with the provided role for the named bucket.\nfunc PutDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error {\n\tacl := &raw.ObjectAccessControl{\n\t\tBucket: bucket,\n\t\tEntity: string(entity),\n\t\tRole:   string(role),\n\t}\n\t_, err := rawService(ctx).DefaultObjectAccessControls.Update(bucket, string(entity), acl).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage: error updating default ACL rule for bucket %q, entity %q: %v\", bucket, entity, err)\n\t}\n\treturn nil\n}\n\n// DeleteDefaultACLRule deletes the named default ACL entity for the named bucket.\nfunc DeleteDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity) error {\n\terr := rawService(ctx).DefaultObjectAccessControls.Delete(bucket, string(entity)).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage: error deleting default ACL rule for bucket %q, entity %q: %v\", bucket, entity, err)\n\t}\n\treturn nil\n}\n\n// BucketACL returns the ACL entries for the named bucket.\nfunc BucketACL(ctx context.Context, bucket string) ([]ACLRule, error) {\n\tacls, err := rawService(ctx).BucketAccessControls.List(bucket).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"storage: error listing bucket ACL for bucket %q: %v\", bucket, err)\n\t}\n\tr := make([]ACLRule, len(acls.Items))\n\tfor i, v := range acls.Items {\n\t\tr[i].Entity = ACLEntity(v.Entity)\n\t\tr[i].Role = ACLRole(v.Role)\n\t}\n\treturn r, nil\n}\n\n// PutBucketACLRule saves the named ACL entity with the provided role for the named bucket.\nfunc PutBucketACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error {\n\tacl := &raw.BucketAccessControl{\n\t\tBucket: bucket,\n\t\tEntity: string(entity),\n\t\tRole:   string(role),\n\t}\n\t_, err := rawService(ctx).BucketAccessControls.Update(bucket, string(entity), acl).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage: error updating bucket ACL rule for bucket %q, entity %q: %v\", bucket, entity, err)\n\t}\n\treturn nil\n}\n\n// DeleteBucketACLRule deletes the named ACL entity for the named bucket.\nfunc DeleteBucketACLRule(ctx context.Context, bucket string, entity ACLEntity) error {\n\terr := rawService(ctx).BucketAccessControls.Delete(bucket, string(entity)).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage: error deleting bucket ACL rule for bucket %q, entity %q: %v\", bucket, entity, err)\n\t}\n\treturn nil\n}\n\n// ACL returns the ACL entries for the named object.\nfunc ACL(ctx context.Context, bucket, object string) ([]ACLRule, error) {\n\tacls, err := rawService(ctx).ObjectAccessControls.List(bucket, object).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"storage: error listing object ACL for bucket %q, file %q: %v\", bucket, object, err)\n\t}\n\tr := make([]ACLRule, 0, len(acls.Items))\n\tfor _, v := range acls.Items {\n\t\tif m, ok := v.(map[string]interface{}); ok {\n\t\t\tentity, ok1 := m[\"entity\"].(string)\n\t\t\trole, ok2 := m[\"role\"].(string)\n\t\t\tif ok1 && ok2 {\n\t\t\t\tr = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})\n\t\t\t}\n\t\t}\n\t}\n\treturn r, nil\n}\n\n// PutACLRule saves the named ACL entity with the provided role for the named object.\nfunc PutACLRule(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole) error {\n\tacl := &raw.ObjectAccessControl{\n\t\tBucket: bucket,\n\t\tEntity: string(entity),\n\t\tRole:   string(role),\n\t}\n\t_, err := rawService(ctx).ObjectAccessControls.Update(bucket, object, string(entity), acl).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage: error updating object ACL rule for bucket %q, file %q, entity %q: %v\", bucket, object, entity, err)\n\t}\n\treturn nil\n}\n\n// DeleteACLRule deletes the named ACL entity for the named object.\nfunc DeleteACLRule(ctx context.Context, bucket, object string, entity ACLEntity) error {\n\terr := rawService(ctx).ObjectAccessControls.Delete(bucket, object, string(entity)).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage: error deleting object ACL rule for bucket %q, file %q, entity %q: %v\", bucket, object, entity, err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/storage/storage.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package storage contains a Google Cloud Storage client.\n//\n// This package is experimental and may make backwards-incompatible changes.\npackage storage\n\nimport (\n\t\"crypto\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/sha256\"\n\t\"crypto/x509\"\n\t\"encoding/base64\"\n\t\"encoding/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"google.golang.org/cloud/internal\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/googleapi\"\n\traw \"google.golang.org/api/storage/v1\"\n)\n\nvar (\n\tErrBucketNotExist = errors.New(\"storage: bucket doesn't exist\")\n\tErrObjectNotExist = errors.New(\"storage: object doesn't exist\")\n)\n\nconst (\n\t// ScopeFullControl grants permissions to manage your\n\t// data and permissions in Google Cloud Storage.\n\tScopeFullControl = raw.DevstorageFullControlScope\n\n\t// ScopeReadOnly grants permissions to\n\t// view your data in Google Cloud Storage.\n\tScopeReadOnly = raw.DevstorageReadOnlyScope\n\n\t// ScopeReadWrite grants permissions to manage your\n\t// data in Google Cloud Storage.\n\tScopeReadWrite = raw.DevstorageReadWriteScope\n)\n\n// TODO(jbd): Add storage.buckets.list.\n// TODO(jbd): Add storage.buckets.insert.\n// TODO(jbd): Add storage.buckets.update.\n// TODO(jbd): Add storage.buckets.delete.\n\n// TODO(jbd): Add storage.objects.watch.\n\n// BucketInfo returns the metadata for the specified bucket.\nfunc BucketInfo(ctx context.Context, name string) (*Bucket, error) {\n\tresp, err := rawService(ctx).Buckets.Get(name).Projection(\"full\").Do()\n\tif e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {\n\t\treturn nil, ErrBucketNotExist\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newBucket(resp), nil\n}\n\n// ListObjects lists objects from the bucket. You can specify a query\n// to filter the results. If q is nil, no filtering is applied.\nfunc ListObjects(ctx context.Context, bucket string, q *Query) (*Objects, error) {\n\tc := rawService(ctx).Objects.List(bucket)\n\tc.Projection(\"full\")\n\tif q != nil {\n\t\tc.Delimiter(q.Delimiter)\n\t\tc.Prefix(q.Prefix)\n\t\tc.Versions(q.Versions)\n\t\tc.PageToken(q.Cursor)\n\t\tif q.MaxResults > 0 {\n\t\t\tc.MaxResults(int64(q.MaxResults))\n\t\t}\n\t}\n\tresp, err := c.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tobjects := &Objects{\n\t\tResults:  make([]*Object, len(resp.Items)),\n\t\tPrefixes: make([]string, len(resp.Prefixes)),\n\t}\n\tfor i, item := range resp.Items {\n\t\tobjects.Results[i] = newObject(item)\n\t}\n\tfor i, prefix := range resp.Prefixes {\n\t\tobjects.Prefixes[i] = prefix\n\t}\n\tif resp.NextPageToken != \"\" {\n\t\tnext := Query{}\n\t\tif q != nil {\n\t\t\t// keep the other filtering\n\t\t\t// criteria if there is a query\n\t\t\tnext = *q\n\t\t}\n\t\tnext.Cursor = resp.NextPageToken\n\t\tobjects.Next = &next\n\t}\n\treturn objects, nil\n}\n\n// SignedURLOptions allows you to restrict the access to the signed URL.\ntype SignedURLOptions struct {\n\t// GoogleAccessID represents the authorizer of the signed URL generation.\n\t// It is typically the Google service account client email address from\n\t// the Google Developers Console in the form of \"xxx@developer.gserviceaccount.com\".\n\t// Required.\n\tGoogleAccessID string\n\n\t// PrivateKey is the Google service account private key. It is obtainable\n\t// from the Google Developers Console.\n\t// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,\n\t// create a service account client ID or reuse one of your existing service account\n\t// credentials. Click on the \"Generate new P12 key\" to generate and download\n\t// a new private key. Once you download the P12 file, use the following command\n\t// to convert it into a PEM file.\n\t//\n\t//    $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes\n\t//\n\t// Provide the contents of the PEM file as a byte slice.\n\t// Required.\n\tPrivateKey []byte\n\n\t// Method is the HTTP method to be used with the signed URL.\n\t// Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.\n\t// Required.\n\tMethod string\n\n\t// Expires is the expiration time on the signed URL. It must be\n\t// a datetime in the future.\n\t// Required.\n\tExpires time.Time\n\n\t// ContentType is the content type header the client must provide\n\t// to use the generated signed URL.\n\t// Optional.\n\tContentType string\n\n\t// Headers is a list of extention headers the client must provide\n\t// in order to use the generated signed URL.\n\t// Optional.\n\tHeaders []string\n\n\t// MD5 is the base64 encoded MD5 checksum of the file.\n\t// If provided, the client should provide the exact value on the request\n\t// header in order to use the signed URL.\n\t// Optional.\n\tMD5 []byte\n}\n\n// SignedURL returns a URL for the specified object. Signed URLs allow\n// the users access to a restricted resource for a limited time without having a\n// Google account or signing in. For more information about the signed\n// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.\nfunc SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {\n\tif opts == nil {\n\t\treturn \"\", errors.New(\"storage: missing required SignedURLOptions\")\n\t}\n\tif opts.GoogleAccessID == \"\" || opts.PrivateKey == nil {\n\t\treturn \"\", errors.New(\"storage: missing required credentials to generate a signed URL\")\n\t}\n\tif opts.Method == \"\" {\n\t\treturn \"\", errors.New(\"storage: missing required method option\")\n\t}\n\tif opts.Expires.IsZero() {\n\t\treturn \"\", errors.New(\"storage: missing required expires option\")\n\t}\n\tkey, err := parseKey(opts.PrivateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\th := sha256.New()\n\tfmt.Fprintf(h, \"%s\\n\", opts.Method)\n\tfmt.Fprintf(h, \"%s\\n\", opts.MD5)\n\tfmt.Fprintf(h, \"%s\\n\", opts.ContentType)\n\tfmt.Fprintf(h, \"%d\\n\", opts.Expires.Unix())\n\tfmt.Fprintf(h, \"%s\", strings.Join(opts.Headers, \"\\n\"))\n\tfmt.Fprintf(h, \"/%s/%s\", bucket, name)\n\tb, err := rsa.SignPKCS1v15(\n\t\trand.Reader,\n\t\tkey,\n\t\tcrypto.SHA256,\n\t\th.Sum(nil),\n\t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tencoded := base64.StdEncoding.EncodeToString(b)\n\tu := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost:   \"storage.googleapis.com\",\n\t\tPath:   fmt.Sprintf(\"/%s/%s\", bucket, name),\n\t}\n\tq := u.Query()\n\tq.Set(\"GoogleAccessId\", opts.GoogleAccessID)\n\tq.Set(\"Expires\", fmt.Sprintf(\"%d\", opts.Expires.Unix()))\n\tq.Set(\"Signature\", string(encoded))\n\tu.RawQuery = q.Encode()\n\treturn u.String(), nil\n}\n\n// StatObject returns meta information about the specified object.\nfunc StatObject(ctx context.Context, bucket, name string) (*Object, error) {\n\to, err := rawService(ctx).Objects.Get(bucket, name).Projection(\"full\").Do()\n\tif e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {\n\t\treturn nil, ErrObjectNotExist\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newObject(o), nil\n}\n\n// UpdateAttrs updates an object with the provided attributes.\n// All zero-value attributes are ignored.\nfunc UpdateAttrs(ctx context.Context, bucket, name string, attrs ObjectAttrs) (*Object, error) {\n\to, err := rawService(ctx).Objects.Patch(bucket, name, attrs.toRawObject(bucket)).Projection(\"full\").Do()\n\tif e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {\n\t\treturn nil, ErrObjectNotExist\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newObject(o), nil\n}\n\n// DeleteObject deletes the single specified object.\nfunc DeleteObject(ctx context.Context, bucket, name string) error {\n\treturn rawService(ctx).Objects.Delete(bucket, name).Do()\n}\n\n// CopyObject copies the source object to the destination.\n// The copied object's attributes are overwritten by attrs if non-nil.\nfunc CopyObject(ctx context.Context, srcBucket, srcName string, destBucket, destName string, attrs *ObjectAttrs) (*Object, error) {\n\tif srcBucket == \"\" || destBucket == \"\" {\n\t\treturn nil, errors.New(\"storage: srcBucket and destBucket must both be non-empty\")\n\t}\n\tif srcName == \"\" || destName == \"\" {\n\t\treturn nil, errors.New(\"storage: srcName and destName must be non-empty\")\n\t}\n\tvar rawObject *raw.Object\n\tif attrs != nil {\n\t\tattrs.Name = destName\n\t\tif attrs.ContentType == \"\" {\n\t\t\treturn nil, errors.New(\"storage: attrs.ContentType must be non-empty\")\n\t\t}\n\t\trawObject = attrs.toRawObject(destBucket)\n\t}\n\to, err := rawService(ctx).Objects.Copy(\n\t\tsrcBucket, srcName, destBucket, destName, rawObject).Projection(\"full\").Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newObject(o), nil\n}\n\n// NewReader creates a new io.ReadCloser to read the contents\n// of the object.\nfunc NewReader(ctx context.Context, bucket, name string) (io.ReadCloser, error) {\n\thc := internal.HTTPClient(ctx)\n\tu := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost:   \"storage.googleapis.com\",\n\t\tPath:   fmt.Sprintf(\"/%s/%s\", bucket, name),\n\t}\n\tres, err := hc.Get(u.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode == http.StatusNotFound {\n\t\tres.Body.Close()\n\t\treturn nil, ErrObjectNotExist\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\tres.Body.Close()\n\t\treturn res.Body, fmt.Errorf(\"storage: can't read object %v/%v, status code: %v\", bucket, name, res.Status)\n\t}\n\treturn res.Body, nil\n}\n\n// NewWriter returns a storage Writer that writes to the GCS object\n// identified by the specified name.\n// If such an object doesn't exist, it creates one.\n// Attributes can be set on the object by modifying the returned Writer's\n// ObjectAttrs field before the first call to Write. The name parameter to this\n// function is ignored if the Name field of the ObjectAttrs field is set to a\n// non-empty string.\n//\n// It is the caller's responsibility to call Close when writing is done.\n//\n// The object is not available and any previous object with the same\n// name is not replaced on Cloud Storage until Close is called.\nfunc NewWriter(ctx context.Context, bucket, name string) *Writer {\n\treturn &Writer{\n\t\tctx:    ctx,\n\t\tbucket: bucket,\n\t\tname:   name,\n\t\tdonec:  make(chan struct{}),\n\t}\n}\n\nfunc rawService(ctx context.Context) *raw.Service {\n\treturn internal.Service(ctx, \"storage\", func(hc *http.Client) interface{} {\n\t\tsvc, _ := raw.New(hc)\n\t\treturn svc\n\t}).(*raw.Service)\n}\n\n// parseKey converts the binary contents of a private key file\n// to an *rsa.PrivateKey. It detects whether the private key is in a\n// PEM container or not. If so, it extracts the the private key\n// from PEM container before conversion. It only supports PEM\n// containers with no passphrase.\nfunc parseKey(key []byte) (*rsa.PrivateKey, error) {\n\tif block, _ := pem.Decode(key); block != nil {\n\t\tkey = block.Bytes\n\t}\n\tparsedKey, err := x509.ParsePKCS8PrivateKey(key)\n\tif err != nil {\n\t\tparsedKey, err = x509.ParsePKCS1PrivateKey(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tparsed, ok := parsedKey.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn nil, errors.New(\"oauth2: private key is invalid\")\n\t}\n\treturn parsed, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/cloud/storage/types.go",
    "content": "// Copyright 2014 Google Inc. All Rights Reserved.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage storage\n\nimport (\n\t\"encoding/base64\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\traw \"google.golang.org/api/storage/v1\"\n)\n\n// Bucket represents a Google Cloud Storage bucket.\ntype Bucket struct {\n\t// Name is the name of the bucket.\n\tName string\n\n\t// ACL is the list of access control rules on the bucket.\n\tACL []ACLRule\n\n\t// DefaultObjectACL is the list of access controls to\n\t// apply to new objects when no object ACL is provided.\n\tDefaultObjectACL []ACLRule\n\n\t// Location is the location of the bucket. It defaults to \"US\".\n\tLocation string\n\n\t// Metageneration is the metadata generation of the bucket.\n\t// Read-only.\n\tMetageneration int64\n\n\t// StorageClass is the storage class of the bucket. This defines\n\t// how objects in the bucket are stored and determines the SLA\n\t// and the cost of storage. Typical values are \"STANDARD\" and\n\t// \"DURABLE_REDUCED_AVAILABILITY\". Defaults to \"STANDARD\".\n\tStorageClass string\n\n\t// Created is the creation time of the bucket.\n\t// Read-only.\n\tCreated time.Time\n}\n\nfunc newBucket(b *raw.Bucket) *Bucket {\n\tif b == nil {\n\t\treturn nil\n\t}\n\tbucket := &Bucket{\n\t\tName:           b.Name,\n\t\tLocation:       b.Location,\n\t\tMetageneration: b.Metageneration,\n\t\tStorageClass:   b.StorageClass,\n\t\tCreated:        convertTime(b.TimeCreated),\n\t}\n\tacl := make([]ACLRule, len(b.Acl))\n\tfor i, rule := range b.Acl {\n\t\tacl[i] = ACLRule{\n\t\t\tEntity: ACLEntity(rule.Entity),\n\t\t\tRole:   ACLRole(rule.Role),\n\t\t}\n\t}\n\tbucket.ACL = acl\n\tobjACL := make([]ACLRule, len(b.DefaultObjectAcl))\n\tfor i, rule := range b.DefaultObjectAcl {\n\t\tobjACL[i] = ACLRule{\n\t\t\tEntity: ACLEntity(rule.Entity),\n\t\t\tRole:   ACLRole(rule.Role),\n\t\t}\n\t}\n\tbucket.DefaultObjectACL = objACL\n\treturn bucket\n}\n\n// ObjectAttrs is the user-editable object attributes.\ntype ObjectAttrs struct {\n\t// Name is the name of the object.\n\tName string\n\n\t// ContentType is the MIME type of the object's content.\n\t// Optional.\n\tContentType string\n\n\t// ContentLanguage is the optional RFC 1766 Content-Language of\n\t// the object's content sent in response headers.\n\tContentLanguage string\n\n\t// ContentEncoding is the optional Content-Encoding of the object\n\t// sent it the response headers.\n\tContentEncoding string\n\n\t// CacheControl is the optional Cache-Control header of the object\n\t// sent in the response headers.\n\tCacheControl string\n\n\t// ACL is the list of access control rules for the object.\n\t// Optional. If nil or empty, existing ACL rules are preserved.\n\tACL []ACLRule\n\n\t// Metadata represents user-provided metadata, in key/value pairs.\n\t// It can be nil if the current metadata values needs to preserved.\n\tMetadata map[string]string\n}\n\nfunc (o ObjectAttrs) toRawObject(bucket string) *raw.Object {\n\tvar acl []*raw.ObjectAccessControl\n\tif len(o.ACL) > 0 {\n\t\tacl = make([]*raw.ObjectAccessControl, len(o.ACL))\n\t\tfor i, rule := range o.ACL {\n\t\t\tacl[i] = &raw.ObjectAccessControl{\n\t\t\t\tEntity: string(rule.Entity),\n\t\t\t\tRole:   string(rule.Role),\n\t\t\t}\n\t\t}\n\t}\n\treturn &raw.Object{\n\t\tBucket:          bucket,\n\t\tName:            o.Name,\n\t\tContentType:     o.ContentType,\n\t\tContentEncoding: o.ContentEncoding,\n\t\tContentLanguage: o.ContentLanguage,\n\t\tCacheControl:    o.CacheControl,\n\t\tAcl:             acl,\n\t\tMetadata:        o.Metadata,\n\t}\n}\n\n// Object represents a Google Cloud Storage (GCS) object.\ntype Object struct {\n\t// Bucket is the name of the bucket containing this GCS object.\n\tBucket string\n\n\t// Name is the name of the object within the bucket.\n\tName string\n\n\t// ContentType is the MIME type of the object's content.\n\tContentType string\n\n\t// ContentLanguage is the content language of the object's content.\n\tContentLanguage string\n\n\t// CacheControl is the Cache-Control header to be sent in the response\n\t// headers when serving the object data.\n\tCacheControl string\n\n\t// ACL is the list of access control rules for the object.\n\tACL []ACLRule\n\n\t// Owner is the owner of the object.\n\t//\n\t// If non-zero, it is in the form of \"user-<userId>\".\n\tOwner string\n\n\t// Size is the length of the object's content.\n\tSize int64\n\n\t// ContentEncoding is the encoding of the object's content.\n\tContentEncoding string\n\n\t// MD5 is the MD5 hash of the object's content.\n\tMD5 []byte\n\n\t// CRC32C is the CRC32 checksum of the object's content using\n\t// the Castagnoli93 polynomial.\n\tCRC32C uint32\n\n\t// MediaLink is an URL to the object's content.\n\tMediaLink string\n\n\t// Metadata represents user-provided metadata, in key/value pairs.\n\t// It can be nil if no metadata is provided.\n\tMetadata map[string]string\n\n\t// Generation is the generation number of the object's content.\n\tGeneration int64\n\n\t// MetaGeneration is the version of the metadata for this\n\t// object at this generation. This field is used for preconditions\n\t// and for detecting changes in metadata. A metageneration number\n\t// is only meaningful in the context of a particular generation\n\t// of a particular object.\n\tMetaGeneration int64\n\n\t// StorageClass is the storage class of the bucket.\n\t// This value defines how objects in the bucket are stored and\n\t// determines the SLA and the cost of storage. Typical values are\n\t// \"STANDARD\" and \"DURABLE_REDUCED_AVAILABILITY\".\n\t// It defaults to \"STANDARD\".\n\tStorageClass string\n\n\t// Deleted is the time the object was deleted.\n\t// If not deleted, it is the zero value.\n\tDeleted time.Time\n\n\t// Updated is the creation or modification time of the object.\n\t// For buckets with versioning enabled, changing an object's\n\t// metadata does not change this property.\n\tUpdated time.Time\n}\n\n// convertTime converts a time in RFC3339 format to time.Time.\n// If any error occurs in parsing, the zero-value time.Time is silently returned.\nfunc convertTime(t string) time.Time {\n\tvar r time.Time\n\tif t != \"\" {\n\t\tr, _ = time.Parse(time.RFC3339, t)\n\t}\n\treturn r\n}\n\nfunc newObject(o *raw.Object) *Object {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tacl := make([]ACLRule, len(o.Acl))\n\tfor i, rule := range o.Acl {\n\t\tacl[i] = ACLRule{\n\t\t\tEntity: ACLEntity(rule.Entity),\n\t\t\tRole:   ACLRole(rule.Role),\n\t\t}\n\t}\n\towner := \"\"\n\tif o.Owner != nil {\n\t\towner = o.Owner.Entity\n\t}\n\tmd5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)\n\tvar crc32c uint32\n\td, err := base64.StdEncoding.DecodeString(o.Crc32c)\n\tif err == nil && len(d) == 4 {\n\t\tcrc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3])\n\t}\n\treturn &Object{\n\t\tBucket:          o.Bucket,\n\t\tName:            o.Name,\n\t\tContentType:     o.ContentType,\n\t\tContentLanguage: o.ContentLanguage,\n\t\tCacheControl:    o.CacheControl,\n\t\tACL:             acl,\n\t\tOwner:           owner,\n\t\tContentEncoding: o.ContentEncoding,\n\t\tSize:            int64(o.Size),\n\t\tMD5:             md5,\n\t\tCRC32C:          crc32c,\n\t\tMediaLink:       o.MediaLink,\n\t\tMetadata:        o.Metadata,\n\t\tGeneration:      o.Generation,\n\t\tMetaGeneration:  o.Metageneration,\n\t\tStorageClass:    o.StorageClass,\n\t\tDeleted:         convertTime(o.TimeDeleted),\n\t\tUpdated:         convertTime(o.Updated),\n\t}\n}\n\n// Query represents a query to filter objects from a bucket.\ntype Query struct {\n\t// Delimiter returns results in a directory-like fashion.\n\t// Results will contain only objects whose names, aside from the\n\t// prefix, do not contain delimiter. Objects whose names,\n\t// aside from the prefix, contain delimiter will have their name,\n\t// truncated after the delimiter, returned in prefixes.\n\t// Duplicate prefixes are omitted.\n\t// Optional.\n\tDelimiter string\n\n\t// Prefix is the prefix filter to query objects\n\t// whose names begin with this prefix.\n\t// Optional.\n\tPrefix string\n\n\t// Versions indicates whether multiple versions of the same\n\t// object will be included in the results.\n\tVersions bool\n\n\t// Cursor is a previously-returned page token\n\t// representing part of the larger set of results to view.\n\t// Optional.\n\tCursor string\n\n\t// MaxResults is the maximum number of items plus prefixes\n\t// to return. As duplicate prefixes are omitted,\n\t// fewer total results may be returned than requested.\n\t// The default page limit is used if it is negative or zero.\n\tMaxResults int\n}\n\n// Objects represents a list of objects returned from\n// a bucket look-p request and a query to retrieve more\n// objects from the next pages.\ntype Objects struct {\n\t// Results represent a list of object results.\n\tResults []*Object\n\n\t// Next is the continuation query to retrieve more\n\t// results with the same filtering criteria. If there\n\t// are no more results to retrieve, it is nil.\n\tNext *Query\n\n\t// Prefixes represents prefixes of objects\n\t// matching-but-not-listed up to and including\n\t// the requested delimiter.\n\tPrefixes []string\n}\n\n// contentTyper implements ContentTyper to enable an\n// io.ReadCloser to specify its MIME type.\ntype contentTyper struct {\n\tio.Reader\n\tt string\n}\n\nfunc (c *contentTyper) ContentType() string {\n\treturn c.t\n}\n\n// A Writer writes a Cloud Storage object.\ntype Writer struct {\n\t// ObjectAttrs are optional attributes to set on the object. Any attributes\n\t// must be initialized before the first Write call. Nil or zero-valued\n\t// attributes are ignored.\n\tObjectAttrs\n\n\tctx    context.Context\n\tbucket string\n\tname   string\n\n\tonce sync.Once\n\n\topened bool\n\tr      io.Reader\n\tpw     *io.PipeWriter\n\n\tdonec chan struct{} // closed after err and obj are set.\n\terr   error\n\tobj   *Object\n}\n\nfunc (w *Writer) open() {\n\tattrs := w.ObjectAttrs\n\t// Always set the name, otherwise the backend\n\t// rejects the request and responds with an HTTP 400.\n\tif attrs.Name == \"\" {\n\t\tattrs.Name = w.name\n\t}\n\tpr, pw := io.Pipe()\n\tw.r = &contentTyper{pr, attrs.ContentType}\n\tw.pw = pw\n\tw.opened = true\n\n\tgo func() {\n\t\tresp, err := rawService(w.ctx).Objects.Insert(\n\t\t\tw.bucket, attrs.toRawObject(w.bucket)).Media(w.r).Projection(\"full\").Do()\n\t\tw.err = err\n\t\tif err == nil {\n\t\t\tw.obj = newObject(resp)\n\t\t} else {\n\t\t\tpr.CloseWithError(w.err)\n\t\t}\n\t\tclose(w.donec)\n\t}()\n}\n\n// Write appends to w.\nfunc (w *Writer) Write(p []byte) (n int, err error) {\n\tif w.err != nil {\n\t\treturn 0, w.err\n\t}\n\tif !w.opened {\n\t\tw.open()\n\t}\n\treturn w.pw.Write(p)\n}\n\n// Close completes the write operation and flushes any buffered data.\n// If Close doesn't return an error, metadata about the written object\n// can be retrieved by calling Object.\nfunc (w *Writer) Close() error {\n\tif !w.opened {\n\t\tw.open()\n\t}\n\tif err := w.pw.Close(); err != nil {\n\t\treturn err\n\t}\n\t<-w.donec\n\treturn w.err\n}\n\n// Object returns metadata about a successfully-written object.\n// It's only valid to call it after Close returns nil.\nfunc (w *Writer) Object() *Object {\n\treturn w.obj\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/.travis.yml",
    "content": "sudo: false\n\nlanguage: go\n\ninstall:\n  - go get -v -t -d google.golang.org/grpc/...\n\nscript:\n  - go test -v -cpu 1,4 google.golang.org/grpc/...\n  - go test -v -race -cpu 1,4 google.golang.org/grpc/...\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/CONTRIBUTING.md",
    "content": "# How to contribute\n\nWe definitely welcome patches and contribution to grpc! Here is some guideline\nand information about how to do so.\n\n## Getting started\n\n### Legal requirements\n\nIn order to protect both you and ourselves, you will need to sign the\n[Contributor License Agreement](https://cla.developers.google.com/clas).\n\n### Filing Issues\nWhen filing an issue, make sure to answer these five questions:\n\n1. What version of Go are you using (`go version`)?\n2. What operating system and processor architecture are you using?\n3. What did you do?\n4. What did you expect to see?\n5. What did you see instead?\n\n### Contributing code\nPlease read the Contribution Guidelines before sending patches.\n\nWe will not accept GitHub pull requests once Gerrit is setup (we will use Gerrit instead for code review).\n\nUnless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/LICENSE",
    "content": "Copyright 2014, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/PATENTS",
    "content": "Additional IP Rights Grant (Patents)\n\n\"This implementation\" means the copyrightable works distributed by\nGoogle as part of the GRPC project.\n\nGoogle hereby grants to You a perpetual, worldwide, non-exclusive,\nno-charge, royalty-free, irrevocable (except as stated in this section)\npatent license to make, have made, use, offer to sell, sell, import,\ntransfer and otherwise run, modify and propagate the contents of this\nimplementation of GRPC, where such license applies only to those patent\nclaims, both currently owned or controlled by Google and acquired in\nthe future, licensable by Google that are necessarily infringed by this\nimplementation of GRPC.  This grant does not include claims that would be\ninfringed only as a consequence of further modification of this\nimplementation.  If you or your agent or exclusive licensee institute or\norder or agree to the institution of patent litigation against any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging\nthat this implementation of GRPC or any code incorporated within this\nimplementation of GRPC constitutes direct or contributory patent\ninfringement, or inducement of patent infringement, then any patent\nrights granted to you under this License for this implementation of GRPC\nshall terminate as of the date such litigation is filed.\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/README.md",
    "content": "#gRPC-Go\n\n[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)\n\nThe Go implementation of [gRPC](https://github.com/grpc/grpc)\n\nInstallation\n------------\n\nTo install this package, you need to install Go 1.4 and setup your Go workspace on your computer. The simplest way to install the library is to run:\n\n```\n$ go get google.golang.org/grpc\n```\n\nDocumentation\n-------------\nYou can find more detailed documentation and examples in the [grpc-common repository](http://github.com/grpc/grpc-common).\n\nStatus\n------\nAlpha - ready for early adopters.\n\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/benchmark.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n/*\nPackage benchmark implements the building blocks to setup end-to-end gRPC benchmarks.\n*/\npackage benchmark\n\nimport (\n\t\"io\"\n\t\"math\"\n\t\"net\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc\"\n\ttestpb \"google.golang.org/grpc/benchmark/grpc_testing\"\n\t\"google.golang.org/grpc/grpclog\"\n)\n\nfunc newPayload(t testpb.PayloadType, size int) *testpb.Payload {\n\tif size < 0 {\n\t\tgrpclog.Fatalf(\"Requested a response with invalid length %d\", size)\n\t}\n\tbody := make([]byte, size)\n\tswitch t {\n\tcase testpb.PayloadType_COMPRESSABLE:\n\tcase testpb.PayloadType_UNCOMPRESSABLE:\n\t\tgrpclog.Fatalf(\"PayloadType UNCOMPRESSABLE is not supported\")\n\tdefault:\n\t\tgrpclog.Fatalf(\"Unsupported payload type: %d\", t)\n\t}\n\treturn &testpb.Payload{\n\t\tType: t,\n\t\tBody: body,\n\t}\n}\n\ntype testServer struct {\n}\n\nfunc (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {\n\treturn &testpb.SimpleResponse{\n\t\tPayload: newPayload(in.ResponseType, int(in.ResponseSize)),\n\t}, nil\n}\n\nfunc (s *testServer) StreamingCall(stream testpb.TestService_StreamingCallServer) error {\n\tfor {\n\t\tin, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\t// read done.\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := stream.Send(&testpb.SimpleResponse{\n\t\t\tPayload: newPayload(in.ResponseType, int(in.ResponseSize)),\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// StartServer starts a gRPC server serving a benchmark service. It returns its\n// listen address and a function to stop the server.\nfunc StartServer() (string, func()) {\n\tlis, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"Failed to listen: %v\", err)\n\t}\n\ts := grpc.NewServer(grpc.MaxConcurrentStreams(math.MaxUint32))\n\ttestpb.RegisterTestServiceServer(s, &testServer{})\n\tgo s.Serve(lis)\n\treturn lis.Addr().String(), func() {\n\t\ts.Stop()\n\t}\n}\n\n// DoUnaryCall performs an unary RPC with given stub and request and response sizes.\nfunc DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) {\n\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize)\n\treq := &testpb.SimpleRequest{\n\t\tResponseType: pl.Type,\n\t\tResponseSize: int32(respSize),\n\t\tPayload:      pl,\n\t}\n\tif _, err := tc.UnaryCall(context.Background(), req); err != nil {\n\t\tgrpclog.Fatal(\"/TestService/UnaryCall RPC failed: \", err)\n\t}\n}\n\n// DoStreamingRoundTrip performs a round trip  for a single streaming rpc.\nfunc DoStreamingRoundTrip(tc testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient, reqSize, respSize int) {\n\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize)\n\treq := &testpb.SimpleRequest{\n\t\tResponseType: pl.Type,\n\t\tResponseSize: int32(respSize),\n\t\tPayload:      pl,\n\t}\n\tif err := stream.Send(req); err != nil {\n\t\tgrpclog.Fatalf(\"%v.StreamingCall(_) = _, %v\", tc, err)\n\t}\n\tif _, err := stream.Recv(); err != nil {\n\t\tgrpclog.Fatal(\"%v.StreamingCall(_) = _, %v\", tc, err)\n\t}\n}\n\n// NewClientConn creates a gRPC client connection to addr.\nfunc NewClientConn(addr string) *grpc.ClientConn {\n\tconn, err := grpc.Dial(addr)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"NewClientConn(%q) failed to create a ClientConn %v\", addr, err)\n\t}\n\treturn conn\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/client/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"math\"\n\t\"net\"\n\t\"net/http\"\n\t_ \"net/http/pprof\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/benchmark\"\n\ttestpb \"google.golang.org/grpc/benchmark/grpc_testing\"\n\t\"google.golang.org/grpc/benchmark/stats\"\n\t\"google.golang.org/grpc/grpclog\"\n)\n\nvar (\n\tserver            = flag.String(\"server\", \"\", \"The server address\")\n\tmaxConcurrentRPCs = flag.Int(\"max_concurrent_rpcs\", 1, \"The max number of concurrent RPCs\")\n\tduration          = flag.Int(\"duration\", math.MaxInt32, \"The duration in seconds to run the benchmark client\")\n\trpcType           = flag.Int(\"rpc_type\", 0,\n\t\t`Configure different client rpc type. Valid options are:\n\t\t   0 : unary call;\n\t\t   1 : streaming call.`)\n)\n\nfunc unaryCaller(client testpb.TestServiceClient) {\n\tbenchmark.DoUnaryCall(client, 1, 1)\n}\n\nfunc streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) {\n\tbenchmark.DoStreamingRoundTrip(client, stream, 1, 1)\n}\n\nfunc buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.TestServiceClient) {\n\ts = stats.NewStats(256)\n\tconn = benchmark.NewClientConn(*server)\n\ttc = testpb.NewTestServiceClient(conn)\n\treturn s, conn, tc\n}\n\nfunc closeLoopUnary() {\n\n\ts, conn, tc := buildConnection()\n\n\tfor i := 0; i < 100; i++ {\n\t\tunaryCaller(tc)\n\t}\n\tch := make(chan int, *maxConcurrentRPCs*4)\n\tvar (\n\t\tmu sync.Mutex\n\t\twg sync.WaitGroup\n\t)\n\twg.Add(*maxConcurrentRPCs)\n\n\tfor i := 0; i < *maxConcurrentRPCs; i++ {\n\t\tgo func() {\n\t\t\tfor _ = range ch {\n\t\t\t\tstart := time.Now()\n\t\t\t\tunaryCaller(tc)\n\t\t\t\telapse := time.Since(start)\n\t\t\t\tmu.Lock()\n\t\t\t\ts.Add(elapse)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t// Stop the client when time is up.\n\tdone := make(chan struct{})\n\tgo func() {\n\t\t<-time.After(time.Duration(*duration) * time.Second)\n\t\tclose(done)\n\t}()\n\tok := true\n\tfor ok {\n\t\tselect {\n\t\tcase ch <- 0:\n\t\tcase <-done:\n\t\t\tok = false\n\t\t}\n\t}\n\tclose(ch)\n\twg.Wait()\n\tconn.Close()\n\tgrpclog.Println(s.String())\n\n}\n\nfunc closeLoopStream() {\n\ts, conn, tc := buildConnection()\n\tstream, err := tc.StreamingCall(context.Background())\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.StreamingCall(_) = _, %v\", tc, err)\n\t}\n\tfor i := 0; i < 100; i++ {\n\t\tstreamCaller(tc, stream)\n\t}\n\tch := make(chan int, *maxConcurrentRPCs*4)\n\tvar (\n\t\tmu sync.Mutex\n\t\twg sync.WaitGroup\n\t)\n\twg.Add(*maxConcurrentRPCs)\n\t// Distribute RPCs over maxConcurrentCalls workers.\n\tfor i := 0; i < *maxConcurrentRPCs; i++ {\n\t\tgo func() {\n\t\t\tfor _ = range ch {\n\t\t\t\tstart := time.Now()\n\t\t\t\tstreamCaller(tc, stream)\n\t\t\t\telapse := time.Since(start)\n\t\t\t\tmu.Lock()\n\t\t\t\ts.Add(elapse)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t// Stop the client when time is up.\n\tdone := make(chan struct{})\n\tgo func() {\n\t\t<-time.After(time.Duration(*duration) * time.Second)\n\t\tclose(done)\n\t}()\n\tok := true\n\tfor ok {\n\t\tselect {\n\t\tcase ch <- 0:\n\t\tcase <-done:\n\t\t\tok = false\n\t\t}\n\t}\n\tclose(ch)\n\twg.Wait()\n\tconn.Close()\n\tgrpclog.Println(s.String())\n}\nfunc main() {\n\tflag.Parse()\n\n\tgo func() {\n\t\tlis, err := net.Listen(\"tcp\", \":0\")\n\t\tif err != nil {\n\t\t\tgrpclog.Fatalf(\"Failed to listen: %v\", err)\n\t\t}\n\t\tgrpclog.Println(\"Client profiling address: \", lis.Addr().String())\n\t\tif err := http.Serve(lis, nil); err != nil {\n\t\t\tgrpclog.Fatalf(\"Failed to serve: %v\", err)\n\t\t}\n\t}()\n\tswitch *rpcType {\n\tcase 0:\n\t\tcloseLoopUnary()\n\tcase 1:\n\t\tcloseLoopStream()\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: test.proto\n// DO NOT EDIT!\n\n/*\nPackage grpc_testing is a generated protocol buffer package.\n\nIt is generated from these files:\n\ttest.proto\n\nIt has these top-level messages:\n\tStatsRequest\n\tServerStats\n\tPayload\n\tHistogramData\n\tClientConfig\n\tMark\n\tClientArgs\n\tClientStats\n\tClientStatus\n\tServerConfig\n\tServerArgs\n\tServerStatus\n\tSimpleRequest\n\tSimpleResponse\n*/\npackage grpc_testing\n\nimport proto \"github.com/golang/protobuf/proto\"\n\nimport (\n\tcontext \"golang.org/x/net/context\"\n\tgrpc \"google.golang.org/grpc\"\n)\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ context.Context\nvar _ grpc.ClientConn\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\ntype PayloadType int32\n\nconst (\n\t// Compressable text format.\n\tPayloadType_COMPRESSABLE PayloadType = 0\n\t// Uncompressable binary format.\n\tPayloadType_UNCOMPRESSABLE PayloadType = 1\n\t// Randomly chosen from all other formats defined in this enum.\n\tPayloadType_RANDOM PayloadType = 2\n)\n\nvar PayloadType_name = map[int32]string{\n\t0: \"COMPRESSABLE\",\n\t1: \"UNCOMPRESSABLE\",\n\t2: \"RANDOM\",\n}\nvar PayloadType_value = map[string]int32{\n\t\"COMPRESSABLE\":   0,\n\t\"UNCOMPRESSABLE\": 1,\n\t\"RANDOM\":         2,\n}\n\nfunc (x PayloadType) String() string {\n\treturn proto.EnumName(PayloadType_name, int32(x))\n}\n\ntype ClientType int32\n\nconst (\n\tClientType_SYNCHRONOUS_CLIENT ClientType = 0\n\tClientType_ASYNC_CLIENT       ClientType = 1\n)\n\nvar ClientType_name = map[int32]string{\n\t0: \"SYNCHRONOUS_CLIENT\",\n\t1: \"ASYNC_CLIENT\",\n}\nvar ClientType_value = map[string]int32{\n\t\"SYNCHRONOUS_CLIENT\": 0,\n\t\"ASYNC_CLIENT\":       1,\n}\n\nfunc (x ClientType) String() string {\n\treturn proto.EnumName(ClientType_name, int32(x))\n}\n\ntype ServerType int32\n\nconst (\n\tServerType_SYNCHRONOUS_SERVER ServerType = 0\n\tServerType_ASYNC_SERVER       ServerType = 1\n)\n\nvar ServerType_name = map[int32]string{\n\t0: \"SYNCHRONOUS_SERVER\",\n\t1: \"ASYNC_SERVER\",\n}\nvar ServerType_value = map[string]int32{\n\t\"SYNCHRONOUS_SERVER\": 0,\n\t\"ASYNC_SERVER\":       1,\n}\n\nfunc (x ServerType) String() string {\n\treturn proto.EnumName(ServerType_name, int32(x))\n}\n\ntype RpcType int32\n\nconst (\n\tRpcType_UNARY     RpcType = 0\n\tRpcType_STREAMING RpcType = 1\n)\n\nvar RpcType_name = map[int32]string{\n\t0: \"UNARY\",\n\t1: \"STREAMING\",\n}\nvar RpcType_value = map[string]int32{\n\t\"UNARY\":     0,\n\t\"STREAMING\": 1,\n}\n\nfunc (x RpcType) String() string {\n\treturn proto.EnumName(RpcType_name, int32(x))\n}\n\ntype StatsRequest struct {\n\t// run number\n\tTestNum int32 `protobuf:\"varint,1,opt,name=test_num\" json:\"test_num,omitempty\"`\n}\n\nfunc (m *StatsRequest) Reset()         { *m = StatsRequest{} }\nfunc (m *StatsRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StatsRequest) ProtoMessage()    {}\n\ntype ServerStats struct {\n\t// wall clock time\n\tTimeElapsed float64 `protobuf:\"fixed64,1,opt,name=time_elapsed\" json:\"time_elapsed,omitempty\"`\n\t// user time used by the server process and threads\n\tTimeUser float64 `protobuf:\"fixed64,2,opt,name=time_user\" json:\"time_user,omitempty\"`\n\t// server time used by the server process and all threads\n\tTimeSystem float64 `protobuf:\"fixed64,3,opt,name=time_system\" json:\"time_system,omitempty\"`\n}\n\nfunc (m *ServerStats) Reset()         { *m = ServerStats{} }\nfunc (m *ServerStats) String() string { return proto.CompactTextString(m) }\nfunc (*ServerStats) ProtoMessage()    {}\n\ntype Payload struct {\n\t// The type of data in body.\n\tType PayloadType `protobuf:\"varint,1,opt,name=type,enum=grpc.testing.PayloadType\" json:\"type,omitempty\"`\n\t// Primary contents of payload.\n\tBody []byte `protobuf:\"bytes,2,opt,name=body,proto3\" json:\"body,omitempty\"`\n}\n\nfunc (m *Payload) Reset()         { *m = Payload{} }\nfunc (m *Payload) String() string { return proto.CompactTextString(m) }\nfunc (*Payload) ProtoMessage()    {}\n\ntype HistogramData struct {\n\tBucket       []uint32 `protobuf:\"varint,1,rep,name=bucket\" json:\"bucket,omitempty\"`\n\tMinSeen      float64  `protobuf:\"fixed64,2,opt,name=min_seen\" json:\"min_seen,omitempty\"`\n\tMaxSeen      float64  `protobuf:\"fixed64,3,opt,name=max_seen\" json:\"max_seen,omitempty\"`\n\tSum          float64  `protobuf:\"fixed64,4,opt,name=sum\" json:\"sum,omitempty\"`\n\tSumOfSquares float64  `protobuf:\"fixed64,5,opt,name=sum_of_squares\" json:\"sum_of_squares,omitempty\"`\n\tCount        float64  `protobuf:\"fixed64,6,opt,name=count\" json:\"count,omitempty\"`\n}\n\nfunc (m *HistogramData) Reset()         { *m = HistogramData{} }\nfunc (m *HistogramData) String() string { return proto.CompactTextString(m) }\nfunc (*HistogramData) ProtoMessage()    {}\n\ntype ClientConfig struct {\n\tServerTargets             []string   `protobuf:\"bytes,1,rep,name=server_targets\" json:\"server_targets,omitempty\"`\n\tClientType                ClientType `protobuf:\"varint,2,opt,name=client_type,enum=grpc.testing.ClientType\" json:\"client_type,omitempty\"`\n\tEnableSsl                 bool       `protobuf:\"varint,3,opt,name=enable_ssl\" json:\"enable_ssl,omitempty\"`\n\tOutstandingRpcsPerChannel int32      `protobuf:\"varint,4,opt,name=outstanding_rpcs_per_channel\" json:\"outstanding_rpcs_per_channel,omitempty\"`\n\tClientChannels            int32      `protobuf:\"varint,5,opt,name=client_channels\" json:\"client_channels,omitempty\"`\n\tPayloadSize               int32      `protobuf:\"varint,6,opt,name=payload_size\" json:\"payload_size,omitempty\"`\n\t// only for async client:\n\tAsyncClientThreads int32   `protobuf:\"varint,7,opt,name=async_client_threads\" json:\"async_client_threads,omitempty\"`\n\tRpcType            RpcType `protobuf:\"varint,8,opt,name=rpc_type,enum=grpc.testing.RpcType\" json:\"rpc_type,omitempty\"`\n}\n\nfunc (m *ClientConfig) Reset()         { *m = ClientConfig{} }\nfunc (m *ClientConfig) String() string { return proto.CompactTextString(m) }\nfunc (*ClientConfig) ProtoMessage()    {}\n\n// Request current stats\ntype Mark struct {\n}\n\nfunc (m *Mark) Reset()         { *m = Mark{} }\nfunc (m *Mark) String() string { return proto.CompactTextString(m) }\nfunc (*Mark) ProtoMessage()    {}\n\ntype ClientArgs struct {\n\tSetup *ClientConfig `protobuf:\"bytes,1,opt,name=setup\" json:\"setup,omitempty\"`\n\tMark  *Mark         `protobuf:\"bytes,2,opt,name=mark\" json:\"mark,omitempty\"`\n}\n\nfunc (m *ClientArgs) Reset()         { *m = ClientArgs{} }\nfunc (m *ClientArgs) String() string { return proto.CompactTextString(m) }\nfunc (*ClientArgs) ProtoMessage()    {}\n\nfunc (m *ClientArgs) GetSetup() *ClientConfig {\n\tif m != nil {\n\t\treturn m.Setup\n\t}\n\treturn nil\n}\n\nfunc (m *ClientArgs) GetMark() *Mark {\n\tif m != nil {\n\t\treturn m.Mark\n\t}\n\treturn nil\n}\n\ntype ClientStats struct {\n\tLatencies   *HistogramData `protobuf:\"bytes,1,opt,name=latencies\" json:\"latencies,omitempty\"`\n\tTimeElapsed float64        `protobuf:\"fixed64,3,opt,name=time_elapsed\" json:\"time_elapsed,omitempty\"`\n\tTimeUser    float64        `protobuf:\"fixed64,4,opt,name=time_user\" json:\"time_user,omitempty\"`\n\tTimeSystem  float64        `protobuf:\"fixed64,5,opt,name=time_system\" json:\"time_system,omitempty\"`\n}\n\nfunc (m *ClientStats) Reset()         { *m = ClientStats{} }\nfunc (m *ClientStats) String() string { return proto.CompactTextString(m) }\nfunc (*ClientStats) ProtoMessage()    {}\n\nfunc (m *ClientStats) GetLatencies() *HistogramData {\n\tif m != nil {\n\t\treturn m.Latencies\n\t}\n\treturn nil\n}\n\ntype ClientStatus struct {\n\tStats *ClientStats `protobuf:\"bytes,1,opt,name=stats\" json:\"stats,omitempty\"`\n}\n\nfunc (m *ClientStatus) Reset()         { *m = ClientStatus{} }\nfunc (m *ClientStatus) String() string { return proto.CompactTextString(m) }\nfunc (*ClientStatus) ProtoMessage()    {}\n\nfunc (m *ClientStatus) GetStats() *ClientStats {\n\tif m != nil {\n\t\treturn m.Stats\n\t}\n\treturn nil\n}\n\ntype ServerConfig struct {\n\tServerType ServerType `protobuf:\"varint,1,opt,name=server_type,enum=grpc.testing.ServerType\" json:\"server_type,omitempty\"`\n\tThreads    int32      `protobuf:\"varint,2,opt,name=threads\" json:\"threads,omitempty\"`\n\tEnableSsl  bool       `protobuf:\"varint,3,opt,name=enable_ssl\" json:\"enable_ssl,omitempty\"`\n}\n\nfunc (m *ServerConfig) Reset()         { *m = ServerConfig{} }\nfunc (m *ServerConfig) String() string { return proto.CompactTextString(m) }\nfunc (*ServerConfig) ProtoMessage()    {}\n\ntype ServerArgs struct {\n\tSetup *ServerConfig `protobuf:\"bytes,1,opt,name=setup\" json:\"setup,omitempty\"`\n\tMark  *Mark         `protobuf:\"bytes,2,opt,name=mark\" json:\"mark,omitempty\"`\n}\n\nfunc (m *ServerArgs) Reset()         { *m = ServerArgs{} }\nfunc (m *ServerArgs) String() string { return proto.CompactTextString(m) }\nfunc (*ServerArgs) ProtoMessage()    {}\n\nfunc (m *ServerArgs) GetSetup() *ServerConfig {\n\tif m != nil {\n\t\treturn m.Setup\n\t}\n\treturn nil\n}\n\nfunc (m *ServerArgs) GetMark() *Mark {\n\tif m != nil {\n\t\treturn m.Mark\n\t}\n\treturn nil\n}\n\ntype ServerStatus struct {\n\tStats *ServerStats `protobuf:\"bytes,1,opt,name=stats\" json:\"stats,omitempty\"`\n\tPort  int32        `protobuf:\"varint,2,opt,name=port\" json:\"port,omitempty\"`\n}\n\nfunc (m *ServerStatus) Reset()         { *m = ServerStatus{} }\nfunc (m *ServerStatus) String() string { return proto.CompactTextString(m) }\nfunc (*ServerStatus) ProtoMessage()    {}\n\nfunc (m *ServerStatus) GetStats() *ServerStats {\n\tif m != nil {\n\t\treturn m.Stats\n\t}\n\treturn nil\n}\n\ntype SimpleRequest struct {\n\t// Desired payload type in the response from the server.\n\t// If response_type is RANDOM, server randomly chooses one from other formats.\n\tResponseType PayloadType `protobuf:\"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType\" json:\"response_type,omitempty\"`\n\t// Desired payload size in the response from the server.\n\t// If response_type is COMPRESSABLE, this denotes the size before compression.\n\tResponseSize int32 `protobuf:\"varint,2,opt,name=response_size\" json:\"response_size,omitempty\"`\n\t// Optional input payload sent along with the request.\n\tPayload *Payload `protobuf:\"bytes,3,opt,name=payload\" json:\"payload,omitempty\"`\n}\n\nfunc (m *SimpleRequest) Reset()         { *m = SimpleRequest{} }\nfunc (m *SimpleRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SimpleRequest) ProtoMessage()    {}\n\nfunc (m *SimpleRequest) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\ntype SimpleResponse struct {\n\tPayload *Payload `protobuf:\"bytes,1,opt,name=payload\" json:\"payload,omitempty\"`\n}\n\nfunc (m *SimpleResponse) Reset()         { *m = SimpleResponse{} }\nfunc (m *SimpleResponse) String() string { return proto.CompactTextString(m) }\nfunc (*SimpleResponse) ProtoMessage()    {}\n\nfunc (m *SimpleResponse) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"grpc.testing.PayloadType\", PayloadType_name, PayloadType_value)\n\tproto.RegisterEnum(\"grpc.testing.ClientType\", ClientType_name, ClientType_value)\n\tproto.RegisterEnum(\"grpc.testing.ServerType\", ServerType_name, ServerType_value)\n\tproto.RegisterEnum(\"grpc.testing.RpcType\", RpcType_name, RpcType_value)\n}\n\n// Client API for TestService service\n\ntype TestServiceClient interface {\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tUnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error)\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tStreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error)\n}\n\ntype testServiceClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient {\n\treturn &testServiceClient{cc}\n}\n\nfunc (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) {\n\tout := new(SimpleResponse)\n\terr := grpc.Invoke(ctx, \"/grpc.testing.TestService/UnaryCall\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *testServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, \"/grpc.testing.TestService/StreamingCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceStreamingCallClient{stream}\n\treturn x, nil\n}\n\ntype TestService_StreamingCallClient interface {\n\tSend(*SimpleRequest) error\n\tRecv() (*SimpleResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceStreamingCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceStreamingCallClient) Send(m *SimpleRequest) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *testServiceStreamingCallClient) Recv() (*SimpleResponse, error) {\n\tm := new(SimpleResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Server API for TestService service\n\ntype TestServiceServer interface {\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tUnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error)\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tStreamingCall(TestService_StreamingCallServer) error\n}\n\nfunc RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) {\n\ts.RegisterService(&_TestService_serviceDesc, srv)\n}\n\nfunc _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(SimpleRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(TestServiceServer).UnaryCall(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _TestService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(TestServiceServer).StreamingCall(&testServiceStreamingCallServer{stream})\n}\n\ntype TestService_StreamingCallServer interface {\n\tSend(*SimpleResponse) error\n\tRecv() (*SimpleRequest, error)\n\tgrpc.ServerStream\n}\n\ntype testServiceStreamingCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceStreamingCallServer) Send(m *SimpleResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *testServiceStreamingCallServer) Recv() (*SimpleRequest, error) {\n\tm := new(SimpleRequest)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nvar _TestService_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"grpc.testing.TestService\",\n\tHandlerType: (*TestServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"UnaryCall\",\n\t\t\tHandler:    _TestService_UnaryCall_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"StreamingCall\",\n\t\t\tHandler:       _TestService_StreamingCall_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t},\n}\n\n// Client API for Worker service\n\ntype WorkerClient interface {\n\t// Start test with specified workload\n\tRunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error)\n\t// Start test with specified workload\n\tRunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error)\n}\n\ntype workerClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewWorkerClient(cc *grpc.ClientConn) WorkerClient {\n\treturn &workerClient{cc}\n}\n\nfunc (c *workerClient) RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[0], c.cc, \"/grpc.testing.Worker/RunTest\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &workerRunTestClient{stream}\n\treturn x, nil\n}\n\ntype Worker_RunTestClient interface {\n\tSend(*ClientArgs) error\n\tRecv() (*ClientStatus, error)\n\tgrpc.ClientStream\n}\n\ntype workerRunTestClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *workerRunTestClient) Send(m *ClientArgs) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *workerRunTestClient) Recv() (*ClientStatus, error) {\n\tm := new(ClientStatus)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *workerClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[1], c.cc, \"/grpc.testing.Worker/RunServer\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &workerRunServerClient{stream}\n\treturn x, nil\n}\n\ntype Worker_RunServerClient interface {\n\tSend(*ServerArgs) error\n\tRecv() (*ServerStatus, error)\n\tgrpc.ClientStream\n}\n\ntype workerRunServerClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *workerRunServerClient) Send(m *ServerArgs) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *workerRunServerClient) Recv() (*ServerStatus, error) {\n\tm := new(ServerStatus)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Server API for Worker service\n\ntype WorkerServer interface {\n\t// Start test with specified workload\n\tRunTest(Worker_RunTestServer) error\n\t// Start test with specified workload\n\tRunServer(Worker_RunServerServer) error\n}\n\nfunc RegisterWorkerServer(s *grpc.Server, srv WorkerServer) {\n\ts.RegisterService(&_Worker_serviceDesc, srv)\n}\n\nfunc _Worker_RunTest_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(WorkerServer).RunTest(&workerRunTestServer{stream})\n}\n\ntype Worker_RunTestServer interface {\n\tSend(*ClientStatus) error\n\tRecv() (*ClientArgs, error)\n\tgrpc.ServerStream\n}\n\ntype workerRunTestServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *workerRunTestServer) Send(m *ClientStatus) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *workerRunTestServer) Recv() (*ClientArgs, error) {\n\tm := new(ClientArgs)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc _Worker_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(WorkerServer).RunServer(&workerRunServerServer{stream})\n}\n\ntype Worker_RunServerServer interface {\n\tSend(*ServerStatus) error\n\tRecv() (*ServerArgs, error)\n\tgrpc.ServerStream\n}\n\ntype workerRunServerServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *workerRunServerServer) Send(m *ServerStatus) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *workerRunServerServer) Recv() (*ServerArgs, error) {\n\tm := new(ServerArgs)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nvar _Worker_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"grpc.testing.Worker\",\n\tHandlerType: (*WorkerServer)(nil),\n\tMethods:     []grpc.MethodDesc{},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"RunTest\",\n\t\t\tHandler:       _Worker_RunTest_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"RunServer\",\n\t\t\tHandler:       _Worker_RunServer_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/grpc_testing/test.proto",
    "content": "// An integration test service that covers all the method signature permutations\n// of unary/streaming requests/responses.\nsyntax = \"proto3\";\n\npackage grpc.testing;\n\nenum PayloadType {\n    // Compressable text format.\n    COMPRESSABLE = 0;\n\n    // Uncompressable binary format.\n    UNCOMPRESSABLE = 1;\n\n    // Randomly chosen from all other formats defined in this enum.\n    RANDOM = 2;\n}\n\nmessage StatsRequest {\n    // run number\n    optional int32 test_num = 1;\n}\n\nmessage ServerStats {\n    // wall clock time\n    double time_elapsed = 1;\n\n    // user time used by the server process and threads\n    double time_user = 2;\n\n    // server time used by the server process and all threads\n    double time_system = 3;\n}\n\nmessage Payload {\n    // The type of data in body.\n    PayloadType type = 1;\n    // Primary contents of payload.\n    bytes body = 2;\n}\n\nmessage HistogramData {\n    repeated uint32 bucket = 1;\n    double min_seen = 2;\n    double max_seen = 3;\n    double sum = 4;\n    double sum_of_squares = 5;\n    double count = 6;\n}\n\nenum ClientType {\n    SYNCHRONOUS_CLIENT = 0;\n    ASYNC_CLIENT = 1;\n}\n\nenum ServerType {\n    SYNCHRONOUS_SERVER = 0;\n    ASYNC_SERVER = 1;\n}\n\nenum RpcType {\n    UNARY = 0;\n    STREAMING = 1;\n}\n\nmessage ClientConfig {\n    repeated string server_targets = 1;\n    ClientType client_type = 2;\n    bool enable_ssl = 3;\n    int32 outstanding_rpcs_per_channel = 4;\n    int32 client_channels = 5;\n    int32 payload_size = 6;\n    // only for async client:\n    int32 async_client_threads = 7;\n    RpcType rpc_type = 8;\n}\n\n// Request current stats\nmessage Mark {}\n\nmessage ClientArgs {\n    oneof argtype {\n        ClientConfig setup = 1;\n        Mark mark = 2;\n    }\n}\n\nmessage ClientStats {\n    HistogramData latencies = 1;\n    double time_elapsed = 3;\n    double time_user = 4;\n    double time_system = 5;\n}\n\nmessage ClientStatus {\n    ClientStats stats = 1;\n}\n\nmessage ServerConfig {\n    ServerType server_type = 1;\n    int32 threads = 2;\n    bool enable_ssl = 3;\n}\n\nmessage ServerArgs {\n    oneof argtype {\n        ServerConfig setup = 1;\n        Mark mark = 2;\n    }\n}\n\nmessage ServerStatus {\n    ServerStats stats = 1;\n    int32 port = 2;\n}\n\nmessage SimpleRequest {\n    // Desired payload type in the response from the server.\n    // If response_type is RANDOM, server randomly chooses one from other formats.\n    PayloadType response_type = 1;\n\n    // Desired payload size in the response from the server.\n    // If response_type is COMPRESSABLE, this denotes the size before compression.\n    int32 response_size = 2;\n\n    // Optional input payload sent along with the request.\n    Payload payload = 3;\n}\n\nmessage SimpleResponse {\n    Payload payload = 1;\n}\n\nservice TestService {\n    // One request followed by one response.\n    // The server returns the client payload as-is.\n    rpc UnaryCall(SimpleRequest) returns (SimpleResponse);\n\n    // One request followed by one response.\n    // The server returns the client payload as-is.\n    rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse);\n}\n\nservice Worker {\n    // Start test with specified workload\n    rpc RunTest(stream ClientArgs) returns (stream ClientStatus);\n    // Start test with specified workload\n    rpc RunServer(stream ServerArgs) returns (stream ServerStatus);\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/server/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"math\"\n\t\"net\"\n\t\"net/http\"\n\t_ \"net/http/pprof\"\n\t\"time\"\n\n\t\"google.golang.org/grpc/benchmark\"\n\t\"google.golang.org/grpc/grpclog\"\n)\n\nvar (\n\tduration = flag.Int(\"duration\", math.MaxInt32, \"The duration in seconds to run the benchmark server\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tgo func() {\n\t\tlis, err := net.Listen(\"tcp\", \":0\")\n\t\tif err != nil {\n\t\t\tgrpclog.Fatalf(\"Failed to listen: %v\", err)\n\t\t}\n\t\tgrpclog.Println(\"Server profiling address: \", lis.Addr().String())\n\t\tif err := http.Serve(lis, nil); err != nil {\n\t\t\tgrpclog.Fatalf(\"Failed to serve: %v\", err)\n\t\t}\n\t}()\n\taddr, stopper := benchmark.StartServer()\n\tgrpclog.Println(\"Server Address: \", addr)\n\t<-time.After(time.Duration(*duration) * time.Second)\n\tstopper()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/stats/counter.go",
    "content": "package stats\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\t// TimeNow is used for testing.\n\tTimeNow = time.Now\n)\n\nconst (\n\thour       = 0\n\ttenminutes = 1\n\tminute     = 2\n)\n\n// Counter is a counter that keeps track of its recent values over a given\n// period of time, and with a given resolution. Use newCounter() to instantiate.\ntype Counter struct {\n\tmu         sync.RWMutex\n\tts         [3]*timeseries\n\tlastUpdate time.Time\n}\n\n// newCounter returns a new Counter.\nfunc newCounter() *Counter {\n\tnow := TimeNow()\n\tc := &Counter{}\n\tc.ts[hour] = newTimeSeries(now, time.Hour, time.Minute)\n\tc.ts[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second)\n\tc.ts[minute] = newTimeSeries(now, time.Minute, time.Second)\n\treturn c\n}\n\nfunc (c *Counter) advance() time.Time {\n\tnow := TimeNow()\n\tfor _, ts := range c.ts {\n\t\tts.advanceTime(now)\n\t}\n\treturn now\n}\n\n// Value returns the current value of the counter.\nfunc (c *Counter) Value() int64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.ts[minute].headValue()\n}\n\n// LastUpdate returns the last update time of the counter.\nfunc (c *Counter) LastUpdate() time.Time {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.lastUpdate\n}\n\n// Set updates the current value of the counter.\nfunc (c *Counter) Set(value int64) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.lastUpdate = c.advance()\n\tfor _, ts := range c.ts {\n\t\tts.set(value)\n\t}\n}\n\n// Incr increments the current value of the counter by 'delta'.\nfunc (c *Counter) Incr(delta int64) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.lastUpdate = c.advance()\n\tfor _, ts := range c.ts {\n\t\tts.incr(delta)\n\t}\n}\n\n// Delta1h returns the delta for the last hour.\nfunc (c *Counter) Delta1h() int64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tc.advance()\n\treturn c.ts[hour].delta()\n}\n\n// Delta10m returns the delta for the last 10 minutes.\nfunc (c *Counter) Delta10m() int64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tc.advance()\n\treturn c.ts[tenminutes].delta()\n}\n\n// Delta1m returns the delta for the last minute.\nfunc (c *Counter) Delta1m() int64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tc.advance()\n\treturn c.ts[minute].delta()\n}\n\n// Rate1h returns the rate of change of the counter in the last hour.\nfunc (c *Counter) Rate1h() float64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tc.advance()\n\treturn c.ts[hour].rate()\n}\n\n// Rate10m returns the rate of change of the counter in the last 10 minutes.\nfunc (c *Counter) Rate10m() float64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tc.advance()\n\treturn c.ts[tenminutes].rate()\n}\n\n// Rate1m returns the rate of change of the counter in the last minute.\nfunc (c *Counter) Rate1m() float64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tc.advance()\n\treturn c.ts[minute].rate()\n}\n\n// Reset resets the counter to an empty state.\nfunc (c *Counter) Reset() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tnow := TimeNow()\n\tfor _, ts := range c.ts {\n\t\tts.reset(now)\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/stats/histogram.go",
    "content": "package stats\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n// HistogramValue is the value of Histogram objects.\ntype HistogramValue struct {\n\t// Count is the total number of values added to the histogram.\n\tCount int64\n\t// Sum is the sum of all the values added to the histogram.\n\tSum int64\n\t// Min is the minimum of all the values added to the histogram.\n\tMin int64\n\t// Max is the maximum of all the values added to the histogram.\n\tMax int64\n\t// Buckets contains all the buckets of the histogram.\n\tBuckets []HistogramBucket\n}\n\n// HistogramBucket is one histogram bucket.\ntype HistogramBucket struct {\n\t// LowBound is the lower bound of the bucket.\n\tLowBound int64\n\t// Count is the number of values in the bucket.\n\tCount int64\n}\n\n// Print writes textual output of the histogram values.\nfunc (v HistogramValue) Print(w io.Writer) {\n\tavg := float64(v.Sum) / float64(v.Count)\n\tfmt.Fprintf(w, \"Count: %d  Min: %d  Max: %d  Avg: %.2f\\n\", v.Count, v.Min, v.Max, avg)\n\tfmt.Fprintf(w, \"%s\\n\", strings.Repeat(\"-\", 60))\n\tif v.Count <= 0 {\n\t\treturn\n\t}\n\n\tmaxBucketDigitLen := len(strconv.FormatInt(v.Buckets[len(v.Buckets)-1].LowBound, 10))\n\tif maxBucketDigitLen < 3 {\n\t\t// For \"inf\".\n\t\tmaxBucketDigitLen = 3\n\t}\n\tmaxCountDigitLen := len(strconv.FormatInt(v.Count, 10))\n\tpercentMulti := 100 / float64(v.Count)\n\n\taccCount := int64(0)\n\tfor i, b := range v.Buckets {\n\t\tfmt.Fprintf(w, \"[%*d, \", maxBucketDigitLen, b.LowBound)\n\t\tif i+1 < len(v.Buckets) {\n\t\t\tfmt.Fprintf(w, \"%*d)\", maxBucketDigitLen, v.Buckets[i+1].LowBound)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"%*s)\", maxBucketDigitLen, \"inf\")\n\t\t}\n\n\t\taccCount += b.Count\n\t\tfmt.Fprintf(w, \"  %*d  %5.1f%%  %5.1f%%\", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti)\n\n\t\tconst barScale = 0.1\n\t\tbarLength := int(float64(b.Count)*percentMulti*barScale + 0.5)\n\t\tfmt.Fprintf(w, \"  %s\\n\", strings.Repeat(\"#\", barLength))\n\t}\n}\n\n// String returns the textual output of the histogram values as string.\nfunc (v HistogramValue) String() string {\n\tvar b bytes.Buffer\n\tv.Print(&b)\n\treturn b.String()\n}\n\n// A Histogram accumulates values in the form of a histogram. The type of the\n// values is int64, which is suitable for keeping track of things like RPC\n// latency in milliseconds. New histogram objects should be obtained via the\n// New() function.\ntype Histogram struct {\n\topts    HistogramOptions\n\tbuckets []bucketInternal\n\tcount   *Counter\n\tsum     *Counter\n\ttracker *Tracker\n}\n\n// HistogramOptions contains the parameters that define the histogram's buckets.\ntype HistogramOptions struct {\n\t// NumBuckets is the number of buckets.\n\tNumBuckets int\n\t// GrowthFactor is the growth factor of the buckets. A value of 0.1\n\t// indicates that bucket N+1 will be 10% larger than bucket N.\n\tGrowthFactor float64\n\t// SmallestBucketSize is the size of the first bucket. Bucket sizes are\n\t// rounded down to the nearest integer.\n\tSmallestBucketSize float64\n\t// MinValue is the lower bound of the first bucket.\n\tMinValue int64\n}\n\n// bucketInternal is the internal representation of a bucket, which includes a\n// rate counter.\ntype bucketInternal struct {\n\tlowBound int64\n\tcount    *Counter\n}\n\n// NewHistogram returns a pointer to a new Histogram object that was created\n// with the provided options.\nfunc NewHistogram(opts HistogramOptions) *Histogram {\n\tif opts.NumBuckets == 0 {\n\t\topts.NumBuckets = 32\n\t}\n\tif opts.SmallestBucketSize == 0.0 {\n\t\topts.SmallestBucketSize = 1.0\n\t}\n\th := Histogram{\n\t\topts:    opts,\n\t\tbuckets: make([]bucketInternal, opts.NumBuckets),\n\t\tcount:   newCounter(),\n\t\tsum:     newCounter(),\n\t\ttracker: newTracker(),\n\t}\n\tlow := opts.MinValue\n\tdelta := opts.SmallestBucketSize\n\tfor i := 0; i < opts.NumBuckets; i++ {\n\t\th.buckets[i].lowBound = low\n\t\th.buckets[i].count = newCounter()\n\t\tlow = low + int64(delta)\n\t\tdelta = delta * (1.0 + opts.GrowthFactor)\n\t}\n\treturn &h\n}\n\n// Opts returns a copy of the options used to create the Histogram.\nfunc (h *Histogram) Opts() HistogramOptions {\n\treturn h.opts\n}\n\n// Add adds a value to the histogram.\nfunc (h *Histogram) Add(value int64) error {\n\tbucket, err := h.findBucket(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\th.buckets[bucket].count.Incr(1)\n\th.count.Incr(1)\n\th.sum.Incr(value)\n\th.tracker.Push(value)\n\treturn nil\n}\n\n// LastUpdate returns the time at which the object was last updated.\nfunc (h *Histogram) LastUpdate() time.Time {\n\treturn h.count.LastUpdate()\n}\n\n// Value returns the accumulated state of the histogram since it was created.\nfunc (h *Histogram) Value() HistogramValue {\n\tb := make([]HistogramBucket, len(h.buckets))\n\tfor i, v := range h.buckets {\n\t\tb[i] = HistogramBucket{\n\t\t\tLowBound: v.lowBound,\n\t\t\tCount:    v.count.Value(),\n\t\t}\n\t}\n\n\tv := HistogramValue{\n\t\tCount:   h.count.Value(),\n\t\tSum:     h.sum.Value(),\n\t\tMin:     h.tracker.Min(),\n\t\tMax:     h.tracker.Max(),\n\t\tBuckets: b,\n\t}\n\treturn v\n}\n\n// Delta1h returns the change in the last hour.\nfunc (h *Histogram) Delta1h() HistogramValue {\n\tb := make([]HistogramBucket, len(h.buckets))\n\tfor i, v := range h.buckets {\n\t\tb[i] = HistogramBucket{\n\t\t\tLowBound: v.lowBound,\n\t\t\tCount:    v.count.Delta1h(),\n\t\t}\n\t}\n\n\tv := HistogramValue{\n\t\tCount:   h.count.Delta1h(),\n\t\tSum:     h.sum.Delta1h(),\n\t\tMin:     h.tracker.Min1h(),\n\t\tMax:     h.tracker.Max1h(),\n\t\tBuckets: b,\n\t}\n\treturn v\n}\n\n// Delta10m returns the change in the last 10 minutes.\nfunc (h *Histogram) Delta10m() HistogramValue {\n\tb := make([]HistogramBucket, len(h.buckets))\n\tfor i, v := range h.buckets {\n\t\tb[i] = HistogramBucket{\n\t\t\tLowBound: v.lowBound,\n\t\t\tCount:    v.count.Delta10m(),\n\t\t}\n\t}\n\n\tv := HistogramValue{\n\t\tCount:   h.count.Delta10m(),\n\t\tSum:     h.sum.Delta10m(),\n\t\tMin:     h.tracker.Min10m(),\n\t\tMax:     h.tracker.Max10m(),\n\t\tBuckets: b,\n\t}\n\treturn v\n}\n\n// Delta1m returns the change in the last 10 minutes.\nfunc (h *Histogram) Delta1m() HistogramValue {\n\tb := make([]HistogramBucket, len(h.buckets))\n\tfor i, v := range h.buckets {\n\t\tb[i] = HistogramBucket{\n\t\t\tLowBound: v.lowBound,\n\t\t\tCount:    v.count.Delta1m(),\n\t\t}\n\t}\n\n\tv := HistogramValue{\n\t\tCount:   h.count.Delta1m(),\n\t\tSum:     h.sum.Delta1m(),\n\t\tMin:     h.tracker.Min1m(),\n\t\tMax:     h.tracker.Max1m(),\n\t\tBuckets: b,\n\t}\n\treturn v\n}\n\n// findBucket does a binary search to find in which bucket the value goes.\nfunc (h *Histogram) findBucket(value int64) (int, error) {\n\tlastBucket := len(h.buckets) - 1\n\tmin, max := 0, lastBucket\n\tfor max >= min {\n\t\tb := (min + max) / 2\n\t\tif value >= h.buckets[b].lowBound && (b == lastBucket || value < h.buckets[b+1].lowBound) {\n\t\t\treturn b, nil\n\t\t}\n\t\tif value < h.buckets[b].lowBound {\n\t\t\tmax = b - 1\n\t\t\tcontinue\n\t\t}\n\t\tmin = b + 1\n\t}\n\treturn 0, fmt.Errorf(\"no bucket for value: %f\", value)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/stats/stats.go",
    "content": "package stats\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"time\"\n)\n\n// Stats is a simple helper for gathering additional statistics like histogram\n// during benchmarks. This is not thread safe.\ntype Stats struct {\n\tnumBuckets int\n\tunit       time.Duration\n\tmin, max   int64\n\thistogram  *Histogram\n\n\tdurations durationSlice\n\tdirty     bool\n}\n\ntype durationSlice []time.Duration\n\n// NewStats creates a new Stats instance. If numBuckets is not positive,\n// the default value (16) will be used.\nfunc NewStats(numBuckets int) *Stats {\n\tif numBuckets <= 0 {\n\t\tnumBuckets = 16\n\t}\n\treturn &Stats{\n\t\t// Use one more bucket for the last unbounded bucket.\n\t\tnumBuckets: numBuckets + 1,\n\t\tdurations:  make(durationSlice, 0, 100000),\n\t}\n}\n\n// Add adds an elapsed time per operation to the stats.\nfunc (stats *Stats) Add(d time.Duration) {\n\tstats.durations = append(stats.durations, d)\n\tstats.dirty = true\n}\n\n// Clear resets the stats, removing all values.\nfunc (stats *Stats) Clear() {\n\tstats.durations = stats.durations[:0]\n\tstats.histogram = nil\n\tstats.dirty = false\n}\n\n// maybeUpdate updates internal stat data if there was any newly added\n// stats since this was updated.\nfunc (stats *Stats) maybeUpdate() {\n\tif !stats.dirty {\n\t\treturn\n\t}\n\n\tstats.min = math.MaxInt64\n\tstats.max = 0\n\tfor _, d := range stats.durations {\n\t\tif stats.min > int64(d) {\n\t\t\tstats.min = int64(d)\n\t\t}\n\t\tif stats.max < int64(d) {\n\t\t\tstats.max = int64(d)\n\t\t}\n\t}\n\n\t// Use the largest unit that can represent the minimum time duration.\n\tstats.unit = time.Nanosecond\n\tfor _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} {\n\t\tif stats.min <= int64(u) {\n\t\t\tbreak\n\t\t}\n\t\tstats.unit = u\n\t}\n\n\t// Adjust the min/max according to the new unit.\n\tstats.min /= int64(stats.unit)\n\tstats.max /= int64(stats.unit)\n\tnumBuckets := stats.numBuckets\n\tif n := int(stats.max - stats.min + 1); n < numBuckets {\n\t\tnumBuckets = n\n\t}\n\tstats.histogram = NewHistogram(HistogramOptions{\n\t\tNumBuckets: numBuckets,\n\t\t// max(i.e., Nth lower bound) = min + (1 + growthFactor)^(numBuckets-2).\n\t\tGrowthFactor:       math.Pow(float64(stats.max-stats.min), 1/float64(stats.numBuckets-2)) - 1,\n\t\tSmallestBucketSize: 1.0,\n\t\tMinValue:           stats.min})\n\n\tfor _, d := range stats.durations {\n\t\tstats.histogram.Add(int64(d / stats.unit))\n\t}\n\n\tstats.dirty = false\n}\n\n// Print writes textual output of the Stats.\nfunc (stats *Stats) Print(w io.Writer) {\n\tstats.maybeUpdate()\n\n\tif stats.histogram == nil {\n\t\tfmt.Fprint(w, \"Histogram (empty)\\n\")\n\t} else {\n\t\tfmt.Fprintf(w, \"Histogram (unit: %s)\\n\", fmt.Sprintf(\"%v\", stats.unit)[1:])\n\t\tstats.histogram.Value().Print(w)\n\t}\n}\n\n// String returns the textual output of the Stats as string.\nfunc (stats *Stats) String() string {\n\tvar b bytes.Buffer\n\tstats.Print(&b)\n\treturn b.String()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/stats/timeseries.go",
    "content": "package stats\n\nimport (\n\t\"math\"\n\t\"time\"\n)\n\n// timeseries holds the history of a changing value over a predefined period of\n// time.\ntype timeseries struct {\n\tsize       int           // The number of time slots. Equivalent to len(slots).\n\tresolution time.Duration // The time resolution of each slot.\n\tstepCount  int64         // The number of intervals seen since creation.\n\thead       int           // The position of the current time in slots.\n\ttime       time.Time     // The time at the beginning of the current time slot.\n\tslots      []int64       // A circular buffer of time slots.\n}\n\n// newTimeSeries returns a newly allocated timeseries that covers the requested\n// period with the given resolution.\nfunc newTimeSeries(initialTime time.Time, period, resolution time.Duration) *timeseries {\n\tsize := int(period.Nanoseconds()/resolution.Nanoseconds()) + 1\n\treturn &timeseries{\n\t\tsize:       size,\n\t\tresolution: resolution,\n\t\tstepCount:  1,\n\t\ttime:       initialTime,\n\t\tslots:      make([]int64, size),\n\t}\n}\n\n// advanceTimeWithFill moves the timeseries forward to time t and fills in any\n// slots that get skipped in the process with the given value. Values older than\n// the timeseries period are lost.\nfunc (ts *timeseries) advanceTimeWithFill(t time.Time, value int64) {\n\tadvanceTo := t.Truncate(ts.resolution)\n\tif !advanceTo.After(ts.time) {\n\t\t// This is shortcut for the most common case of a busy counter\n\t\t// where updates come in many times per ts.resolution.\n\t\tts.time = advanceTo\n\t\treturn\n\t}\n\tsteps := int(advanceTo.Sub(ts.time).Nanoseconds() / ts.resolution.Nanoseconds())\n\tts.stepCount += int64(steps)\n\tif steps > ts.size {\n\t\tsteps = ts.size\n\t}\n\tfor steps > 0 {\n\t\tts.head = (ts.head + 1) % ts.size\n\t\tts.slots[ts.head] = value\n\t\tsteps--\n\t}\n\tts.time = advanceTo\n}\n\n// advanceTime moves the timeseries forward to time t and fills in any slots\n// that get skipped in the process with the head value. Values older than the\n// timeseries period are lost.\nfunc (ts *timeseries) advanceTime(t time.Time) {\n\tts.advanceTimeWithFill(t, ts.slots[ts.head])\n}\n\n// set sets the current value of the timeseries.\nfunc (ts *timeseries) set(value int64) {\n\tts.slots[ts.head] = value\n}\n\n// incr sets the current value of the timeseries.\nfunc (ts *timeseries) incr(delta int64) {\n\tts.slots[ts.head] += delta\n}\n\n// headValue returns the latest value from the timeseries.\nfunc (ts *timeseries) headValue() int64 {\n\treturn ts.slots[ts.head]\n}\n\n// headTime returns the time of the latest value from the timeseries.\nfunc (ts *timeseries) headTime() time.Time {\n\treturn ts.time\n}\n\n// tailValue returns the oldest value from the timeseries.\nfunc (ts *timeseries) tailValue() int64 {\n\tif ts.stepCount < int64(ts.size) {\n\t\treturn 0\n\t}\n\treturn ts.slots[(ts.head+1)%ts.size]\n}\n\n// tailTime returns the time of the oldest value from the timeseries.\nfunc (ts *timeseries) tailTime() time.Time {\n\tsize := int64(ts.size)\n\tif ts.stepCount < size {\n\t\tsize = ts.stepCount\n\t}\n\treturn ts.time.Add(-time.Duration(size-1) * ts.resolution)\n}\n\n// delta returns the difference between the newest and oldest values from the\n// timeseries.\nfunc (ts *timeseries) delta() int64 {\n\treturn ts.headValue() - ts.tailValue()\n}\n\n// rate returns the rate of change between the oldest and newest values from\n// the timeseries in units per second.\nfunc (ts *timeseries) rate() float64 {\n\tdeltaTime := ts.headTime().Sub(ts.tailTime()).Seconds()\n\tif deltaTime == 0 {\n\t\treturn 0\n\t}\n\treturn float64(ts.delta()) / deltaTime\n}\n\n// min returns the smallest value from the timeseries.\nfunc (ts *timeseries) min() int64 {\n\tto := ts.size\n\tif ts.stepCount < int64(ts.size) {\n\t\tto = ts.head + 1\n\t}\n\ttail := (ts.head + 1) % ts.size\n\tmin := int64(math.MaxInt64)\n\tfor b := 0; b < to; b++ {\n\t\tif b != tail && ts.slots[b] < min {\n\t\t\tmin = ts.slots[b]\n\t\t}\n\t}\n\treturn min\n}\n\n// max returns the largest value from the timeseries.\nfunc (ts *timeseries) max() int64 {\n\tto := ts.size\n\tif ts.stepCount < int64(ts.size) {\n\t\tto = ts.head + 1\n\t}\n\ttail := (ts.head + 1) % ts.size\n\tmax := int64(math.MinInt64)\n\tfor b := 0; b < to; b++ {\n\t\tif b != tail && ts.slots[b] > max {\n\t\t\tmax = ts.slots[b]\n\t\t}\n\t}\n\treturn max\n}\n\n// reset resets the timeseries to an empty state.\nfunc (ts *timeseries) reset(t time.Time) {\n\tts.head = 0\n\tts.time = t\n\tts.stepCount = 1\n\tts.slots = make([]int64, ts.size)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/stats/tracker.go",
    "content": "package stats\n\nimport (\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n)\n\n// Tracker is a min/max value tracker that keeps track of its min/max values\n// over a given period of time, and with a given resolution. The initial min\n// and max values are math.MaxInt64 and math.MinInt64 respectively.\ntype Tracker struct {\n\tmu           sync.RWMutex\n\tmin, max     int64 // All time min/max.\n\tminTS, maxTS [3]*timeseries\n\tlastUpdate   time.Time\n}\n\n// newTracker returns a new Tracker.\nfunc newTracker() *Tracker {\n\tnow := TimeNow()\n\tt := &Tracker{}\n\tt.minTS[hour] = newTimeSeries(now, time.Hour, time.Minute)\n\tt.minTS[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second)\n\tt.minTS[minute] = newTimeSeries(now, time.Minute, time.Second)\n\tt.maxTS[hour] = newTimeSeries(now, time.Hour, time.Minute)\n\tt.maxTS[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second)\n\tt.maxTS[minute] = newTimeSeries(now, time.Minute, time.Second)\n\tt.init()\n\treturn t\n}\n\nfunc (t *Tracker) init() {\n\tt.min = math.MaxInt64\n\tt.max = math.MinInt64\n\tfor _, ts := range t.minTS {\n\t\tts.set(math.MaxInt64)\n\t}\n\tfor _, ts := range t.maxTS {\n\t\tts.set(math.MinInt64)\n\t}\n}\n\nfunc (t *Tracker) advance() time.Time {\n\tnow := TimeNow()\n\tfor _, ts := range t.minTS {\n\t\tts.advanceTimeWithFill(now, math.MaxInt64)\n\t}\n\tfor _, ts := range t.maxTS {\n\t\tts.advanceTimeWithFill(now, math.MinInt64)\n\t}\n\treturn now\n}\n\n// LastUpdate returns the last update time of the range.\nfunc (t *Tracker) LastUpdate() time.Time {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\treturn t.lastUpdate\n}\n\n// Push adds a new value if it is a new minimum or maximum.\nfunc (t *Tracker) Push(value int64) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.lastUpdate = t.advance()\n\tif t.min > value {\n\t\tt.min = value\n\t}\n\tif t.max < value {\n\t\tt.max = value\n\t}\n\tfor _, ts := range t.minTS {\n\t\tif ts.headValue() > value {\n\t\t\tts.set(value)\n\t\t}\n\t}\n\tfor _, ts := range t.maxTS {\n\t\tif ts.headValue() < value {\n\t\t\tts.set(value)\n\t\t}\n\t}\n}\n\n// Min returns the minimum value of the tracker\nfunc (t *Tracker) Min() int64 {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\treturn t.min\n}\n\n// Max returns the maximum value of the tracker.\nfunc (t *Tracker) Max() int64 {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\treturn t.max\n}\n\n// Min1h returns the minimum value for the last hour.\nfunc (t *Tracker) Min1h() int64 {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.advance()\n\treturn t.minTS[hour].min()\n}\n\n// Max1h returns the maximum value for the last hour.\nfunc (t *Tracker) Max1h() int64 {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.advance()\n\treturn t.maxTS[hour].max()\n}\n\n// Min10m returns the minimum value for the last 10 minutes.\nfunc (t *Tracker) Min10m() int64 {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.advance()\n\treturn t.minTS[tenminutes].min()\n}\n\n// Max10m returns the maximum value for the last 10 minutes.\nfunc (t *Tracker) Max10m() int64 {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.advance()\n\treturn t.maxTS[tenminutes].max()\n}\n\n// Min1m returns the minimum value for the last 1 minute.\nfunc (t *Tracker) Min1m() int64 {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.advance()\n\treturn t.minTS[minute].min()\n}\n\n// Max1m returns the maximum value for the last 1 minute.\nfunc (t *Tracker) Max1m() int64 {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.advance()\n\treturn t.maxTS[minute].max()\n}\n\n// Reset resets the range to an empty state.\nfunc (t *Tracker) Reset() {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tnow := TimeNow()\n\tfor _, ts := range t.minTS {\n\t\tts.reset(now)\n\t}\n\tfor _, ts := range t.maxTS {\n\t\tts.reset(now)\n\t}\n\tt.init()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/benchmark/stats/util.go",
    "content": "package stats\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n)\n\nvar (\n\tcurB         *testing.B\n\tcurBenchName string\n\tcurStats     map[string]*Stats\n\n\torgStdout  *os.File\n\tnextOutPos int\n\n\tinjectCond *sync.Cond\n\tinjectDone chan struct{}\n)\n\n// AddStats adds a new unnamed Stats instance to the current benchmark. You need\n// to run benchmarks by calling RunTestMain() to inject the stats to the\n// benchmark results. If numBuckets is not positive, the default value (16) will\n// be used. Please note that this calls b.ResetTimer() since it may be blocked\n// until the previous benchmark stats is printed out. So AddStats() should\n// typically be called at the very beginning of each benchmark function.\nfunc AddStats(b *testing.B, numBuckets int) *Stats {\n\treturn AddStatsWithName(b, \"\", numBuckets)\n}\n\n// AddStatsWithName adds a new named Stats instance to the current benchmark.\n// With this, you can add multiple stats in a single benchmark. You need\n// to run benchmarks by calling RunTestMain() to inject the stats to the\n// benchmark results. If numBuckets is not positive, the default value (16) will\n// be used. Please note that this calls b.ResetTimer() since it may be blocked\n// until the previous benchmark stats is printed out. So AddStatsWithName()\n// should typically be called at the very beginning of each benchmark function.\nfunc AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats {\n\tvar benchName string\n\tfor i := 1; ; i++ {\n\t\tpc, _, _, ok := runtime.Caller(i)\n\t\tif !ok {\n\t\t\tpanic(\"benchmark function not found\")\n\t\t}\n\t\tp := strings.Split(runtime.FuncForPC(pc).Name(), \".\")\n\t\tbenchName = p[len(p)-1]\n\t\tif strings.HasPrefix(benchName, \"Benchmark\") {\n\t\t\tbreak\n\t\t}\n\t}\n\tprocs := runtime.GOMAXPROCS(-1)\n\tif procs != 1 {\n\t\tbenchName = fmt.Sprintf(\"%s-%d\", benchName, procs)\n\t}\n\n\tstats := NewStats(numBuckets)\n\n\tif injectCond != nil {\n\t\t// We need to wait until the previous benchmark stats is printed out.\n\t\tinjectCond.L.Lock()\n\t\tfor curB != nil && curBenchName != benchName {\n\t\t\tinjectCond.Wait()\n\t\t}\n\n\t\tcurB = b\n\t\tcurBenchName = benchName\n\t\tcurStats[name] = stats\n\n\t\tinjectCond.L.Unlock()\n\t}\n\n\tb.ResetTimer()\n\treturn stats\n}\n\n// RunTestMain runs the tests with enabling injection of benchmark stats. It\n// returns an exit code to pass to os.Exit.\nfunc RunTestMain(m *testing.M) int {\n\tstartStatsInjector()\n\tdefer stopStatsInjector()\n\treturn m.Run()\n}\n\n// startStatsInjector starts stats injection to benchmark results.\nfunc startStatsInjector() {\n\torgStdout = os.Stdout\n\tr, w, _ := os.Pipe()\n\tos.Stdout = w\n\tnextOutPos = 0\n\n\tresetCurBenchStats()\n\n\tinjectCond = sync.NewCond(&sync.Mutex{})\n\tinjectDone = make(chan struct{})\n\tgo func() {\n\t\tdefer close(injectDone)\n\n\t\tscanner := bufio.NewScanner(r)\n\t\tscanner.Split(splitLines)\n\t\tfor scanner.Scan() {\n\t\t\tinjectStatsIfFinished(scanner.Text())\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n}\n\n// stopStatsInjector stops stats injection and restores os.Stdout.\nfunc stopStatsInjector() {\n\tos.Stdout.Close()\n\t<-injectDone\n\tinjectCond = nil\n\tos.Stdout = orgStdout\n}\n\n// splitLines is a split function for a bufio.Scanner that returns each line\n// of text, teeing texts to the original stdout even before each line ends.\nfunc splitLines(data []byte, eof bool) (advance int, token []byte, err error) {\n\tif eof && len(data) == 0 {\n\t\treturn 0, nil, nil\n\t}\n\n\tif i := bytes.IndexByte(data, '\\n'); i >= 0 {\n\t\torgStdout.Write(data[nextOutPos : i+1])\n\t\tnextOutPos = 0\n\t\treturn i + 1, data[0:i], nil\n\t}\n\n\torgStdout.Write(data[nextOutPos:])\n\tnextOutPos = len(data)\n\n\tif eof {\n\t\t// This is a final, non-terminated line. Return it.\n\t\treturn len(data), data, nil\n\t}\n\n\treturn 0, nil, nil\n}\n\n// injectStatsIfFinished prints out the stats if the current benchmark finishes.\nfunc injectStatsIfFinished(line string) {\n\tinjectCond.L.Lock()\n\tdefer injectCond.L.Unlock()\n\n\t// We assume that the benchmark results start with the benchmark name.\n\tif curB == nil || !strings.HasPrefix(line, curBenchName) {\n\t\treturn\n\t}\n\n\tif !curB.Failed() {\n\t\t// Output all stats in alphabetical order.\n\t\tnames := make([]string, 0, len(curStats))\n\t\tfor name := range curStats {\n\t\t\tnames = append(names, name)\n\t\t}\n\t\tsort.Strings(names)\n\t\tfor _, name := range names {\n\t\t\tstats := curStats[name]\n\t\t\t// The output of stats starts with a header like \"Histogram (unit: ms)\"\n\t\t\t// followed by statistical properties and the buckets. Add the stats name\n\t\t\t// if it is a named stats and indent them as Go testing outputs.\n\t\t\tlines := strings.Split(stats.String(), \"\\n\")\n\t\t\tif n := len(lines); n > 0 {\n\t\t\t\tif name != \"\" {\n\t\t\t\t\tname = \": \" + name\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(orgStdout, \"--- %s%s\\n\", lines[0], name)\n\t\t\t\tfor _, line := range lines[1 : n-1] {\n\t\t\t\t\tfmt.Fprintf(orgStdout, \"\\t%s\\n\", line)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tresetCurBenchStats()\n\tinjectCond.Signal()\n}\n\n// resetCurBenchStats resets the current benchmark stats.\nfunc resetCurBenchStats() {\n\tcurB = nil\n\tcurBenchName = \"\"\n\tcurStats = make(map[string]*Stats)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/call.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage grpc\n\nimport (\n\t\"io\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/transport\"\n)\n\n// recvResponse receives and parses an RPC response.\n// On error, it returns the error and indicates whether the call should be retried.\n//\n// TODO(zhaoq): Check whether the received message sequence is valid.\nfunc recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {\n\t// Try to acquire header metadata from the server if there is any.\n\tvar err error\n\tc.headerMD, err = stream.Header()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp := &parser{s: stream}\n\tfor {\n\t\tif err = recv(p, codec, reply); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\tc.trailerMD = stream.Trailer()\n\treturn nil\n}\n\n// sendRequest writes out various information of an RPC such as Context and Message.\nfunc sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {\n\tstream, err := t.NewStream(ctx, callHdr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif _, ok := err.(transport.ConnectionError); !ok {\n\t\t\t\tt.CloseStream(stream, err)\n\t\t\t}\n\t\t}\n\t}()\n\t// TODO(zhaoq): Support compression.\n\toutBuf, err := encode(codec, args, compressionNone)\n\tif err != nil {\n\t\treturn nil, transport.StreamErrorf(codes.Internal, \"grpc: %v\", err)\n\t}\n\terr = t.Write(stream, outBuf, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Sent successfully.\n\treturn stream, nil\n}\n\n// callInfo contains all related configuration and information about an RPC.\ntype callInfo struct {\n\tfailFast  bool\n\theaderMD  metadata.MD\n\ttrailerMD metadata.MD\n}\n\n// Invoke is called by the generated code. It sends the RPC request on the\n// wire and returns after response is received.\nfunc Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {\n\tvar c callInfo\n\tfor _, o := range opts {\n\t\tif err := o.before(&c); err != nil {\n\t\t\treturn toRPCErr(err)\n\t\t}\n\t}\n\tdefer func() {\n\t\tfor _, o := range opts {\n\t\t\to.after(&c)\n\t\t}\n\t}()\n\tcallHdr := &transport.CallHdr{\n\t\tHost:   cc.authority,\n\t\tMethod: method,\n\t}\n\ttopts := &transport.Options{\n\t\tLast:  true,\n\t\tDelay: false,\n\t}\n\tvar (\n\t\tts      int   // track the transport sequence number\n\t\tlastErr error // record the error that happened\n\t)\n\tfor {\n\t\tvar (\n\t\t\terr    error\n\t\t\tt      transport.ClientTransport\n\t\t\tstream *transport.Stream\n\t\t)\n\t\t// TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs.\n\t\tif lastErr != nil && c.failFast {\n\t\t\treturn toRPCErr(lastErr)\n\t\t}\n\t\tt, ts, err = cc.wait(ctx, ts)\n\t\tif err != nil {\n\t\t\tif lastErr != nil {\n\t\t\t\t// This was a retry; return the error from the last attempt.\n\t\t\t\treturn toRPCErr(lastErr)\n\t\t\t}\n\t\t\treturn toRPCErr(err)\n\t\t}\n\t\tstream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(transport.ConnectionError); ok {\n\t\t\t\tlastErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif lastErr != nil {\n\t\t\t\treturn toRPCErr(lastErr)\n\t\t\t}\n\t\t\treturn toRPCErr(err)\n\t\t}\n\t\t// Receive the response\n\t\tlastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply)\n\t\tif _, ok := lastErr.(transport.ConnectionError); ok {\n\t\t\tcontinue\n\t\t}\n\t\tt.CloseStream(stream, lastErr)\n\t\tif lastErr != nil {\n\t\t\treturn toRPCErr(lastErr)\n\t\t}\n\t\treturn Errorf(stream.StatusCode(), stream.StatusDesc())\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/clientconn.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage grpc\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/grpclog\"\n\t\"google.golang.org/grpc/transport\"\n)\n\nvar (\n\t// ErrUnspecTarget indicates that the target address is unspecified.\n\tErrUnspecTarget = errors.New(\"grpc: target is unspecified\")\n\t// ErrClientConnClosing indicates that the operation is illegal because\n\t// the session is closing.\n\tErrClientConnClosing = errors.New(\"grpc: the client connection is closing\")\n\t// ErrClientConnTimeout indicates that the connection could not be\n\t// established or re-established within the specified timeout.\n\tErrClientConnTimeout = errors.New(\"grpc: timed out trying to connect\")\n)\n\n// dialOptions configure a Dial call. dialOptions are set by the DialOption\n// values passed to Dial.\ntype dialOptions struct {\n\tcodec Codec\n\tcopts transport.ConnectOptions\n}\n\n// DialOption configures how we set up the connection.\ntype DialOption func(*dialOptions)\n\n// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.\nfunc WithCodec(c Codec) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.codec = c\n\t}\n}\n\n// WithTransportCredentials returns a DialOption which configures a\n// connection level security credentials (e.g., TLS/SSL).\nfunc WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.AuthOptions = append(o.copts.AuthOptions, creds)\n\t}\n}\n\n// WithPerRPCCredentials returns a DialOption which sets\n// credentials which will place auth state on each outbound RPC.\nfunc WithPerRPCCredentials(creds credentials.Credentials) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.AuthOptions = append(o.copts.AuthOptions, creds)\n\t}\n}\n\n// WithTimeout returns a DialOption that configures a timeout for dialing a client connection.\nfunc WithTimeout(d time.Duration) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.Timeout = d\n\t}\n}\n\n// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.\nfunc WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.Dialer = f\n\t}\n}\n\n// Dial creates a client connection the given target.\n// TODO(zhaoq): Have an option to make Dial return immediately without waiting\n// for connection to complete.\nfunc Dial(target string, opts ...DialOption) (*ClientConn, error) {\n\tif target == \"\" {\n\t\treturn nil, ErrUnspecTarget\n\t}\n\tcc := &ClientConn{\n\t\ttarget: target,\n\t}\n\tfor _, opt := range opts {\n\t\topt(&cc.dopts)\n\t}\n\tcolonPos := strings.LastIndex(target, \":\")\n\tif colonPos == -1 {\n\t\tcolonPos = len(target)\n\t}\n\tcc.authority = target[:colonPos]\n\tif cc.dopts.codec == nil {\n\t\t// Set the default codec.\n\t\tcc.dopts.codec = protoCodec{}\n\t}\n\tif err := cc.resetTransport(false); err != nil {\n\t\treturn nil, err\n\t}\n\tcc.shutdownChan = make(chan struct{})\n\t// Start to monitor the error status of transport.\n\tgo cc.transportMonitor()\n\treturn cc, nil\n}\n\n// ClientConn represents a client connection to an RPC service.\ntype ClientConn struct {\n\ttarget       string\n\tauthority    string\n\tdopts        dialOptions\n\tshutdownChan chan struct{}\n\n\tmu sync.Mutex\n\t// ready is closed and becomes nil when a new transport is up or failed\n\t// due to timeout.\n\tready chan struct{}\n\t// Indicates the ClientConn is under destruction.\n\tclosing bool\n\t// Every time a new transport is created, this is incremented by 1. Used\n\t// to avoid trying to recreate a transport while the new one is already\n\t// under construction.\n\ttransportSeq int\n\ttransport    transport.ClientTransport\n}\n\nfunc (cc *ClientConn) resetTransport(closeTransport bool) error {\n\tvar retries int\n\tstart := time.Now()\n\tfor {\n\t\tcc.mu.Lock()\n\t\tt := cc.transport\n\t\tts := cc.transportSeq\n\t\t// Avoid wait() picking up a dying transport unnecessarily.\n\t\tcc.transportSeq = 0\n\t\tif cc.closing {\n\t\t\tcc.mu.Unlock()\n\t\t\treturn ErrClientConnClosing\n\t\t}\n\t\tcc.mu.Unlock()\n\t\tif closeTransport {\n\t\t\tt.Close()\n\t\t}\n\t\t// Adjust timeout for the current try.\n\t\tcopts := cc.dopts.copts\n\t\tif copts.Timeout < 0 {\n\t\t\tcc.Close()\n\t\t\treturn ErrClientConnTimeout\n\t\t}\n\t\tif copts.Timeout > 0 {\n\t\t\tcopts.Timeout -= time.Since(start)\n\t\t\tif copts.Timeout <= 0 {\n\t\t\t\tcc.Close()\n\t\t\t\treturn ErrClientConnTimeout\n\t\t\t}\n\t\t}\n\t\tnewTransport, err := transport.NewClientTransport(cc.target, &copts)\n\t\tif err != nil {\n\t\t\tsleepTime := backoff(retries)\n\t\t\t// Fail early before falling into sleep.\n\t\t\tif cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) {\n\t\t\t\tcc.Close()\n\t\t\t\treturn ErrClientConnTimeout\n\t\t\t}\n\t\t\tcloseTransport = false\n\t\t\ttime.Sleep(sleepTime)\n\t\t\tretries++\n\t\t\tgrpclog.Printf(\"grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q\", err, cc.target)\n\t\t\tcontinue\n\t\t}\n\t\tcc.mu.Lock()\n\t\tif cc.closing {\n\t\t\t// cc.Close() has been invoked.\n\t\t\tcc.mu.Unlock()\n\t\t\tnewTransport.Close()\n\t\t\treturn ErrClientConnClosing\n\t\t}\n\t\tcc.transport = newTransport\n\t\tcc.transportSeq = ts + 1\n\t\tif cc.ready != nil {\n\t\t\tclose(cc.ready)\n\t\t\tcc.ready = nil\n\t\t}\n\t\tcc.mu.Unlock()\n\t\treturn nil\n\t}\n}\n\n// Run in a goroutine to track the error in transport and create the\n// new transport if an error happens. It returns when the channel is closing.\nfunc (cc *ClientConn) transportMonitor() {\n\tfor {\n\t\tselect {\n\t\t// shutdownChan is needed to detect the channel teardown when\n\t\t// the ClientConn is idle (i.e., no RPC in flight).\n\t\tcase <-cc.shutdownChan:\n\t\t\treturn\n\t\tcase <-cc.transport.Error():\n\t\t\tif err := cc.resetTransport(true); err != nil {\n\t\t\t\t// The channel is closing.\n\t\t\t\tgrpclog.Printf(\"grpc: ClientConn.transportMonitor exits due to: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n// When wait returns, either the new transport is up or ClientConn is\n// closing. Used to avoid working on a dying transport. It updates and\n// returns the transport and its version when there is no error.\nfunc (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) {\n\tfor {\n\t\tcc.mu.Lock()\n\t\tswitch {\n\t\tcase cc.closing:\n\t\t\tcc.mu.Unlock()\n\t\t\treturn nil, 0, ErrClientConnClosing\n\t\tcase ts < cc.transportSeq:\n\t\t\t// Worked on a dying transport. Try the new one immediately.\n\t\t\tdefer cc.mu.Unlock()\n\t\t\treturn cc.transport, cc.transportSeq, nil\n\t\tdefault:\n\t\t\tready := cc.ready\n\t\t\tif ready == nil {\n\t\t\t\tready = make(chan struct{})\n\t\t\t\tcc.ready = ready\n\t\t\t}\n\t\t\tcc.mu.Unlock()\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil, 0, transport.ContextErr(ctx.Err())\n\t\t\t// Wait until the new transport is ready or failed.\n\t\t\tcase <-ready:\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Close starts to tear down the ClientConn. Returns ErrClientConnClosing if\n// it has been closed (mostly due to dial time-out).\n// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in\n// some edge cases (e.g., the caller opens and closes many ClientConn's in a\n// tight loop.\nfunc (cc *ClientConn) Close() error {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tif cc.closing {\n\t\treturn ErrClientConnClosing\n\t}\n\tcc.closing = true\n\tif cc.ready != nil {\n\t\tclose(cc.ready)\n\t\tcc.ready = nil\n\t}\n\tif cc.transport != nil {\n\t\tcc.transport.Close()\n\t}\n\tif cc.shutdownChan != nil {\n\t\tclose(cc.shutdownChan)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/codegen.sh",
    "content": "#!/bin/bash\n\n# This script serves as an example to demonstrate how to generate the gRPC-Go\n# interface and the related messages from .proto file.\n#\n# It assumes the installation of i) Google proto buffer compiler at\n# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen\n# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have\n# not, please install them first.\n#\n# We recommend running this script at $GOPATH or $GOPATH/src.\n#\n# If this is not what you need, feel free to make your own scripts. Again, this\n# script is for demonstration purpose.\n#\nproto=$1\nprotoc --go_out=plugins=grpc:. $proto\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/codes/code_string.go",
    "content": "// generated by stringer -type=Code; DO NOT EDIT\n\npackage codes\n\nimport \"fmt\"\n\nconst _Code_name = \"OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated\"\n\nvar _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}\n\nfunc (i Code) String() string {\n\tif i+1 >= Code(len(_Code_index)) {\n\t\treturn fmt.Sprintf(\"Code(%d)\", i)\n\t}\n\treturn _Code_name[_Code_index[i]:_Code_index[i+1]]\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/codes/codes.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n// Package codes defines the canonical error codes used by gRPC. It is\n// consistent across various languages.\npackage codes\n\n// A Code is an unsigned 32-bit error code as defined in the gRPC spec.\ntype Code uint32\n\n//go:generate stringer -type=Code\n\nconst (\n\t// OK is returned on success.\n\tOK Code = 0\n\n\t// Canceled indicates the operation was cancelled (typically by the caller).\n\tCanceled Code = 1\n\n\t// Unknown error.  An example of where this error may be returned is\n\t// if a Status value received from another address space belongs to\n\t// an error-space that is not known in this address space.  Also\n\t// errors raised by APIs that do not return enough error information\n\t// may be converted to this error.\n\tUnknown Code = 2\n\n\t// InvalidArgument indicates client specified an invalid argument.\n\t// Note that this differs from FailedPrecondition. It indicates arguments\n\t// that are problematic regardless of the state of the system\n\t// (e.g., a malformed file name).\n\tInvalidArgument Code = 3\n\n\t// DeadlineExceeded means operation expired before completion.\n\t// For operations that change the state of the system, this error may be\n\t// returned even if the operation has completed successfully. For\n\t// example, a successful response from a server could have been delayed\n\t// long enough for the deadline to expire.\n\tDeadlineExceeded Code = 4\n\n\t// NotFound means some requested entity (e.g., file or directory) was\n\t// not found.\n\tNotFound Code = 5\n\n\t// AlreadyExists means an attempt to create an entity failed because one\n\t// already exists.\n\tAlreadyExists Code = 6\n\n\t// PermissionDenied indicates the caller does not have permission to\n\t// execute the specified operation. It must not be used for rejections\n\t// caused by exhausting some resource (use ResourceExhausted\n\t// instead for those errors).  It must not be\n\t// used if the caller cannot be identified (use Unauthenticated\n\t// instead for those errors).\n\tPermissionDenied Code = 7\n\n\t// Unauthenticated indicates the request does not have valid\n\t// authentication credentials for the operation.\n\tUnauthenticated Code = 16\n\n\t// ResourceExhausted indicates some resource has been exhausted, perhaps\n\t// a per-user quota, or perhaps the entire file system is out of space.\n\tResourceExhausted Code = 8\n\n\t// FailedPrecondition indicates operation was rejected because the\n\t// system is not in a state required for the operation's execution.\n\t// For example, directory to be deleted may be non-empty, an rmdir\n\t// operation is applied to a non-directory, etc.\n\t//\n\t// A litmus test that may help a service implementor in deciding\n\t// between FailedPrecondition, Aborted, and Unavailable:\n\t//  (a) Use Unavailable if the client can retry just the failing call.\n\t//  (b) Use Aborted if the client should retry at a higher-level\n\t//      (e.g., restarting a read-modify-write sequence).\n\t//  (c) Use FailedPrecondition if the client should not retry until\n\t//      the system state has been explicitly fixed.  E.g., if an \"rmdir\"\n\t//      fails because the directory is non-empty, FailedPrecondition\n\t//      should be returned since the client should not retry unless\n\t//      they have first fixed up the directory by deleting files from it.\n\t//  (d) Use FailedPrecondition if the client performs conditional\n\t//      REST Get/Update/Delete on a resource and the resource on the\n\t//      server does not match the condition. E.g., conflicting\n\t//      read-modify-write on the same resource.\n\tFailedPrecondition Code = 9\n\n\t// Aborted indicates the operation was aborted, typically due to a\n\t// concurrency issue like sequencer check failures, transaction aborts,\n\t// etc.\n\t//\n\t// See litmus test above for deciding between FailedPrecondition,\n\t// Aborted, and Unavailable.\n\tAborted Code = 10\n\n\t// OutOfRange means operation was attempted past the valid range.\n\t// E.g., seeking or reading past end of file.\n\t//\n\t// Unlike InvalidArgument, this error indicates a problem that may\n\t// be fixed if the system state changes. For example, a 32-bit file\n\t// system will generate InvalidArgument if asked to read at an\n\t// offset that is not in the range [0,2^32-1], but it will generate\n\t// OutOfRange if asked to read from an offset past the current\n\t// file size.\n\t//\n\t// There is a fair bit of overlap between FailedPrecondition and\n\t// OutOfRange.  We recommend using OutOfRange (the more specific\n\t// error) when it applies so that callers who are iterating through\n\t// a space can easily look for an OutOfRange error to detect when\n\t// they are done.\n\tOutOfRange Code = 11\n\n\t// Unimplemented indicates operation is not implemented or not\n\t// supported/enabled in this service.\n\tUnimplemented Code = 12\n\n\t// Internal errors.  Means some invariants expected by underlying\n\t// system has been broken.  If you see one of these errors,\n\t// something is very broken.\n\tInternal Code = 13\n\n\t// Unavailable indicates the service is currently unavailable.\n\t// This is a most likely a transient condition and may be corrected\n\t// by retrying with a backoff.\n\t//\n\t// See litmus test above for deciding between FailedPrecondition,\n\t// Aborted, and Unavailable.\n\tUnavailable Code = 14\n\n\t// DataLoss indicates unrecoverable data loss or corruption.\n\tDataLoss Code = 15\n)\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/credentials/credentials.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n// Package credentials implements various credentials supported by gRPC library,\n// which encapsulate all the state needed by a client to authenticate with a\n// server and make various assertions, e.g., about the client's identity, role,\n// or whether it is authorized to make a particular call.\npackage credentials\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"golang.org/x/oauth2/jwt\"\n)\n\nvar (\n\t// alpnProtoStr are the specified application level protocols for gRPC.\n\talpnProtoStr = []string{\"h2\", \"h2-14\", \"h2-15\", \"h2-16\"}\n)\n\n// Credentials defines the common interface all supported credentials must\n// implement.\ntype Credentials interface {\n\t// GetRequestMetadata gets the current request metadata, refreshing\n\t// tokens if required. This should be called by the transport layer on\n\t// each request, and the data should be populated in headers or other\n\t// context. When supported by the underlying implementation, ctx can\n\t// be used for timeout and cancellation.\n\t// TODO(zhaoq): Define the set of the qualified keys instead of leaving\n\t// it as an arbitrary string.\n\tGetRequestMetadata(ctx context.Context) (map[string]string, error)\n}\n\n// ProtocolInfo provides information regarding the gRPC wire protocol version,\n// security protocol, security protocol version in use, etc.\ntype ProtocolInfo struct {\n\t// ProtocolVersion is the gRPC wire protocol version.\n\tProtocolVersion string\n\t// SecurityProtocol is the security protocol in use.\n\tSecurityProtocol string\n\t// SecurityVersion is the security protocol version.\n\tSecurityVersion string\n}\n\n// TransportAuthenticator defines the common interface for all the live gRPC wire\n// protocols and supported transport security protocols (e.g., TLS, SSL).\ntype TransportAuthenticator interface {\n\t// ClientHandshake does the authentication handshake specified by the corresponding\n\t// authentication protocol on rawConn for clients.\n\tClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, error)\n\t// ServerHandshake does the authentication handshake for servers.\n\tServerHandshake(rawConn net.Conn) (net.Conn, error)\n\t// Info provides the ProtocolInfo of this TransportAuthenticator.\n\tInfo() ProtocolInfo\n\tCredentials\n}\n\n// tlsCreds is the credentials required for authenticating a connection using TLS.\ntype tlsCreds struct {\n\t// TLS configuration\n\tconfig tls.Config\n}\n\nfunc (c *tlsCreds) Info() ProtocolInfo {\n\treturn ProtocolInfo{\n\t\tSecurityProtocol: \"tls\",\n\t\tSecurityVersion:  \"1.2\",\n\t}\n}\n\n// GetRequestMetadata returns nil, nil since TLS credentials does not have\n// metadata.\nfunc (c *tlsCreds) GetRequestMetadata(ctx context.Context) (map[string]string, error) {\n\treturn nil, nil\n}\n\ntype timeoutError struct{}\n\nfunc (timeoutError) Error() string   { return \"credentials: Dial timed out\" }\nfunc (timeoutError) Timeout() bool   { return true }\nfunc (timeoutError) Temporary() bool { return true }\n\nfunc (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, err error) {\n\t// borrow some code from tls.DialWithDialer\n\tvar errChannel chan error\n\tif timeout != 0 {\n\t\terrChannel = make(chan error, 2)\n\t\ttime.AfterFunc(timeout, func() {\n\t\t\terrChannel <- timeoutError{}\n\t\t})\n\t}\n\tif c.config.ServerName == \"\" {\n\t\tcolonPos := strings.LastIndex(addr, \":\")\n\t\tif colonPos == -1 {\n\t\t\tcolonPos = len(addr)\n\t\t}\n\t\tc.config.ServerName = addr[:colonPos]\n\t}\n\tconn := tls.Client(rawConn, &c.config)\n\tif timeout == 0 {\n\t\terr = conn.Handshake()\n\t} else {\n\t\tgo func() {\n\t\t\terrChannel <- conn.Handshake()\n\t\t}()\n\t\terr = <-errChannel\n\t}\n\tif err != nil {\n\t\trawConn.Close()\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}\n\nfunc (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, error) {\n\tconn := tls.Server(rawConn, &c.config)\n\tif err := conn.Handshake(); err != nil {\n\t\trawConn.Close()\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}\n\n// NewTLS uses c to construct a TransportAuthenticator based on TLS.\nfunc NewTLS(c *tls.Config) TransportAuthenticator {\n\ttc := &tlsCreds{*c}\n\ttc.config.NextProtos = alpnProtoStr\n\treturn tc\n}\n\n// NewClientTLSFromCert constructs a TLS from the input certificate for client.\nfunc NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator {\n\treturn NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})\n}\n\n// NewClientTLSFromFile constructs a TLS from the input certificate file for client.\nfunc NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) {\n\tb, err := ioutil.ReadFile(certFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcp := x509.NewCertPool()\n\tif !cp.AppendCertsFromPEM(b) {\n\t\treturn nil, fmt.Errorf(\"credentials: failed to append certificates\")\n\t}\n\treturn NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil\n}\n\n// NewServerTLSFromCert constructs a TLS from the input certificate for server.\nfunc NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator {\n\treturn NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})\n}\n\n// NewServerTLSFromFile constructs a TLS from the input certificate file and key\n// file for server.\nfunc NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) {\n\tcert, err := tls.LoadX509KeyPair(certFile, keyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil\n}\n\n// TokenSource supplies credentials from an oauth2.TokenSource.\ntype TokenSource struct {\n\toauth2.TokenSource\n}\n\n// GetRequestMetadata gets the request metadata as a map from a TokenSource.\nfunc (ts TokenSource) GetRequestMetadata(ctx context.Context) (map[string]string, error) {\n\ttoken, err := ts.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn map[string]string{\n\t\t\"authorization\": token.TokenType + \" \" + token.AccessToken,\n\t}, nil\n}\n\n// NewComputeEngine constructs the credentials that fetches access tokens from\n// Google Compute Engine (GCE)'s metadata server. It is only valid to use this\n// if your program is running on a GCE instance.\n// TODO(dsymonds): Deprecate and remove this.\nfunc NewComputeEngine() Credentials {\n\treturn TokenSource{google.ComputeTokenSource(\"\")}\n}\n\n// serviceAccount represents credentials via JWT signing key.\ntype serviceAccount struct {\n\tconfig *jwt.Config\n}\n\nfunc (s serviceAccount) GetRequestMetadata(ctx context.Context) (map[string]string, error) {\n\ttoken, err := s.config.TokenSource(ctx).Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn map[string]string{\n\t\t\"authorization\": token.TokenType + \" \" + token.AccessToken,\n\t}, nil\n}\n\n// NewServiceAccountFromKey constructs the credentials using the JSON key slice\n// from a Google Developers service account.\nfunc NewServiceAccountFromKey(jsonKey []byte, scope ...string) (Credentials, error) {\n\tconfig, err := google.JWTConfigFromJSON(jsonKey, scope...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn serviceAccount{config: config}, nil\n}\n\n// NewServiceAccountFromFile constructs the credentials using the JSON key file\n// of a Google Developers service account.\nfunc NewServiceAccountFromFile(keyFile string, scope ...string) (Credentials, error) {\n\tjsonKey, err := ioutil.ReadFile(keyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"credentials: failed to read the service account key file: %v\", err)\n\t}\n\treturn NewServiceAccountFromKey(jsonKey, scope...)\n}\n\n// NewApplicationDefault returns \"Application Default Credentials\". For more\n// detail, see https://developers.google.com/accounts/docs/application-default-credentials.\nfunc NewApplicationDefault(ctx context.Context, scope ...string) (Credentials, error) {\n\tt, err := google.DefaultTokenSource(ctx, scope...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn TokenSource{t}, nil\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/doc.go",
    "content": "/*\nPackage grpc implements an RPC system called gRPC.\n\nSee https://github.com/grpc/grpc for more information about gRPC.\n*/\npackage grpc\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/examples/route_guide/README.md",
    "content": "# Description\nThe route guide server and client demonstrate how to use grpc go libraries to\nperform unary, client streaming, server streaming and full duplex RPCs.\n\nPlease refer to [Getting Started Guide for Go] (https://github.com/grpc/grpc-common/blob/master/go/gotutorial.md) for more information.\n\nSee the definition of the route guide service in proto/route_guide.proto.\n\n# Run the sample code\nTo compile and run the server, assuming you are in the root of the route_guide\nfolder, i.e., .../examples/route_guide/, simply:\n\n```sh\n$ go run server/server.go\n```\n\nLikewise, to run the client:\n\n```sh\n$ go run client/client.go\n```\n\n# Optional command line flags\nThe server and client both take optional command line flags. For example, the\nclient and server run without TLS by default. To enable TLS:\n\n```sh\n$ go run server/server.go -tls=true\n```\n\nand\n\n```sh\n$ go run client/client.go -tls=true\n```\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/examples/route_guide/client/client.go",
    "content": "/*\n *\n * Copyright 2015, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n// Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries\n// to perform unary, client streaming, server streaming and full duplex RPCs.\n//\n// It interacts with the route guide service whose definition can be found in proto/route_guide.proto.\npackage main\n\nimport (\n\t\"flag\"\n\t\"io\"\n\t\"math/rand\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\tpb \"google.golang.org/grpc/examples/route_guide/proto\"\n\t\"google.golang.org/grpc/grpclog\"\n)\n\nvar (\n\ttls                = flag.Bool(\"tls\", false, \"Connection uses TLS if true, else plain TCP\")\n\tcaFile             = flag.String(\"ca_file\", \"testdata/ca.pem\", \"The file containning the CA root cert file\")\n\tserverAddr         = flag.String(\"server_addr\", \"127.0.0.1:10000\", \"The server address in the format of host:port\")\n\tserverHostOverride = flag.String(\"server_host_override\", \"x.test.youtube.com\", \"The server name use to verify the hostname returned by TLS handshake\")\n)\n\n// printFeature gets the feature for the given point.\nfunc printFeature(client pb.RouteGuideClient, point *pb.Point) {\n\tgrpclog.Printf(\"Getting feature for point (%d, %d)\", point.Latitude, point.Longitude)\n\tfeature, err := client.GetFeature(context.Background(), point)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.GetFeatures(_) = _, %v: \", client, err)\n\t}\n\tgrpclog.Println(feature)\n}\n\n// printFeatures lists all the features within the given bounding Rectangle.\nfunc printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) {\n\tgrpclog.Printf(\"Looking for features within %v\", rect)\n\tstream, err := client.ListFeatures(context.Background(), rect)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.ListFeatures(_) = _, %v\", client, err)\n\t}\n\tfor {\n\t\tfeature, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tgrpclog.Fatalf(\"%v.ListFeatures(_) = _, %v\", client, err)\n\t\t}\n\t\tgrpclog.Println(feature)\n\t}\n}\n\n// runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server.\nfunc runRecordRoute(client pb.RouteGuideClient) {\n\t// Create a random number of random points\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tpointCount := int(r.Int31n(100)) + 2 // Traverse at least two points\n\tvar points []*pb.Point\n\tfor i := 0; i < pointCount; i++ {\n\t\tpoints = append(points, randomPoint(r))\n\t}\n\tgrpclog.Printf(\"Traversing %d points.\", len(points))\n\tstream, err := client.RecordRoute(context.Background())\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.RecordRoute(_) = _, %v\", client, err)\n\t}\n\tfor _, point := range points {\n\t\tif err := stream.Send(point); err != nil {\n\t\t\tgrpclog.Fatalf(\"%v.Send(%v) = %v\", stream, point, err)\n\t\t}\n\t}\n\treply, err := stream.CloseAndRecv()\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.CloseAndRecv() got error %v, want %v\", stream, err, nil)\n\t}\n\tgrpclog.Printf(\"Route summary: %v\", reply)\n}\n\n// runRouteChat receives a sequence of route notes, while sending notes for various locations.\nfunc runRouteChat(client pb.RouteGuideClient) {\n\tnotes := []*pb.RouteNote{\n\t\t{&pb.Point{0, 1}, \"First message\"},\n\t\t{&pb.Point{0, 2}, \"Second message\"},\n\t\t{&pb.Point{0, 3}, \"Third message\"},\n\t\t{&pb.Point{0, 1}, \"Fourth message\"},\n\t\t{&pb.Point{0, 2}, \"Fifth message\"},\n\t\t{&pb.Point{0, 3}, \"Sixth message\"},\n\t}\n\tstream, err := client.RouteChat(context.Background())\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.RouteChat(_) = _, %v\", client, err)\n\t}\n\twaitc := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tin, err := stream.Recv()\n\t\t\tif err == io.EOF {\n\t\t\t\t// read done.\n\t\t\t\tclose(waitc)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Failed to receive a note : %v\", err)\n\t\t\t}\n\t\t\tgrpclog.Printf(\"Got message %s at point(%d, %d)\", in.Message, in.Location.Latitude, in.Location.Longitude)\n\t\t}\n\t}()\n\tfor _, note := range notes {\n\t\tif err := stream.Send(note); err != nil {\n\t\t\tgrpclog.Fatalf(\"Failed to send a note: %v\", err)\n\t\t}\n\t}\n\tstream.CloseSend()\n\t<-waitc\n}\n\nfunc randomPoint(r *rand.Rand) *pb.Point {\n\tlat := (r.Int31n(180) - 90) * 1e7\n\tlong := (r.Int31n(360) - 180) * 1e7\n\treturn &pb.Point{lat, long}\n}\n\nfunc main() {\n\tflag.Parse()\n\tvar opts []grpc.DialOption\n\tif *tls {\n\t\tvar sn string\n\t\tif *serverHostOverride != \"\" {\n\t\t\tsn = *serverHostOverride\n\t\t}\n\t\tvar creds credentials.TransportAuthenticator\n\t\tif *caFile != \"\" {\n\t\t\tvar err error\n\t\t\tcreds, err = credentials.NewClientTLSFromFile(*caFile, sn)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Failed to create TLS credentials %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tcreds = credentials.NewClientTLSFromCert(nil, sn)\n\t\t}\n\t\topts = append(opts, grpc.WithTransportCredentials(creds))\n\t}\n\tconn, err := grpc.Dial(*serverAddr, opts...)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"fail to dial: %v\", err)\n\t}\n\tdefer conn.Close()\n\tclient := pb.NewRouteGuideClient(conn)\n\n\t// Looking for a valid feature\n\tprintFeature(client, &pb.Point{409146138, -746188906})\n\n\t// Feature missing.\n\tprintFeature(client, &pb.Point{0, 0})\n\n\t// Looking for features between 40, -75 and 42, -73.\n\tprintFeatures(client, &pb.Rectangle{&pb.Point{400000000, -750000000}, &pb.Point{420000000, -730000000}})\n\n\t// RecordRoute\n\trunRecordRoute(client)\n\n\t// RouteChat\n\trunRouteChat(client)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: route_guide.proto\n// DO NOT EDIT!\n\n/*\nPackage proto is a generated protocol buffer package.\n\nIt is generated from these files:\n\troute_guide.proto\n\nIt has these top-level messages:\n\tPoint\n\tRectangle\n\tFeature\n\tRouteNote\n\tRouteSummary\n*/\npackage proto\n\nimport proto1 \"github.com/golang/protobuf/proto\"\n\nimport (\n\tcontext \"golang.org/x/net/context\"\n\tgrpc \"google.golang.org/grpc\"\n)\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ context.Context\nvar _ grpc.ClientConn\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto1.Marshal\n\n// Points are represented as latitude-longitude pairs in the E7 representation\n// (degrees multiplied by 10**7 and rounded to the nearest integer).\n// Latitudes should be in the range +/- 90 degrees and longitude should be in\n// the range +/- 180 degrees (inclusive).\ntype Point struct {\n\tLatitude  int32 `protobuf:\"varint,1,opt,name=latitude\" json:\"latitude,omitempty\"`\n\tLongitude int32 `protobuf:\"varint,2,opt,name=longitude\" json:\"longitude,omitempty\"`\n}\n\nfunc (m *Point) Reset()         { *m = Point{} }\nfunc (m *Point) String() string { return proto1.CompactTextString(m) }\nfunc (*Point) ProtoMessage()    {}\n\n// A latitude-longitude rectangle, represented as two diagonally opposite\n// points \"lo\" and \"hi\".\ntype Rectangle struct {\n\t// One corner of the rectangle.\n\tLo *Point `protobuf:\"bytes,1,opt,name=lo\" json:\"lo,omitempty\"`\n\t// The other corner of the rectangle.\n\tHi *Point `protobuf:\"bytes,2,opt,name=hi\" json:\"hi,omitempty\"`\n}\n\nfunc (m *Rectangle) Reset()         { *m = Rectangle{} }\nfunc (m *Rectangle) String() string { return proto1.CompactTextString(m) }\nfunc (*Rectangle) ProtoMessage()    {}\n\nfunc (m *Rectangle) GetLo() *Point {\n\tif m != nil {\n\t\treturn m.Lo\n\t}\n\treturn nil\n}\n\nfunc (m *Rectangle) GetHi() *Point {\n\tif m != nil {\n\t\treturn m.Hi\n\t}\n\treturn nil\n}\n\n// A feature names something at a given point.\n//\n// If a feature could not be named, the name is empty.\ntype Feature struct {\n\t// The name of the feature.\n\tName string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\t// The point where the feature is detected.\n\tLocation *Point `protobuf:\"bytes,2,opt,name=location\" json:\"location,omitempty\"`\n}\n\nfunc (m *Feature) Reset()         { *m = Feature{} }\nfunc (m *Feature) String() string { return proto1.CompactTextString(m) }\nfunc (*Feature) ProtoMessage()    {}\n\nfunc (m *Feature) GetLocation() *Point {\n\tif m != nil {\n\t\treturn m.Location\n\t}\n\treturn nil\n}\n\n// A RouteNote is a message sent while at a given point.\ntype RouteNote struct {\n\t// The location from which the message is sent.\n\tLocation *Point `protobuf:\"bytes,1,opt,name=location\" json:\"location,omitempty\"`\n\t// The message to be sent.\n\tMessage string `protobuf:\"bytes,2,opt,name=message\" json:\"message,omitempty\"`\n}\n\nfunc (m *RouteNote) Reset()         { *m = RouteNote{} }\nfunc (m *RouteNote) String() string { return proto1.CompactTextString(m) }\nfunc (*RouteNote) ProtoMessage()    {}\n\nfunc (m *RouteNote) GetLocation() *Point {\n\tif m != nil {\n\t\treturn m.Location\n\t}\n\treturn nil\n}\n\n// A RouteSummary is received in response to a RecordRoute rpc.\n//\n// It contains the number of individual points received, the number of\n// detected features, and the total distance covered as the cumulative sum of\n// the distance between each point.\ntype RouteSummary struct {\n\t// The number of points received.\n\tPointCount int32 `protobuf:\"varint,1,opt,name=point_count\" json:\"point_count,omitempty\"`\n\t// The number of known features passed while traversing the route.\n\tFeatureCount int32 `protobuf:\"varint,2,opt,name=feature_count\" json:\"feature_count,omitempty\"`\n\t// The distance covered in metres.\n\tDistance int32 `protobuf:\"varint,3,opt,name=distance\" json:\"distance,omitempty\"`\n\t// The duration of the traversal in seconds.\n\tElapsedTime int32 `protobuf:\"varint,4,opt,name=elapsed_time\" json:\"elapsed_time,omitempty\"`\n}\n\nfunc (m *RouteSummary) Reset()         { *m = RouteSummary{} }\nfunc (m *RouteSummary) String() string { return proto1.CompactTextString(m) }\nfunc (*RouteSummary) ProtoMessage()    {}\n\nfunc init() {\n}\n\n// Client API for RouteGuide service\n\ntype RouteGuideClient interface {\n\t// A simple RPC.\n\t//\n\t// Obtains the feature at a given position.\n\t//\n\t// If no feature is found for the given point, a feature with an empty name\n\t// should be returned.\n\tGetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error)\n\t// A server-to-client streaming RPC.\n\t//\n\t// Obtains the Features available within the given Rectangle.  Results are\n\t// streamed rather than returned at once (e.g. in a response message with a\n\t// repeated field), as the rectangle may cover a large area and contain a\n\t// huge number of features.\n\tListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error)\n\t// A client-to-server streaming RPC.\n\t//\n\t// Accepts a stream of Points on a route being traversed, returning a\n\t// RouteSummary when traversal is completed.\n\tRecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error)\n\t// A Bidirectional streaming RPC.\n\t//\n\t// Accepts a stream of RouteNotes sent while a route is being traversed,\n\t// while receiving other RouteNotes (e.g. from other users).\n\tRouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error)\n}\n\ntype routeGuideClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewRouteGuideClient(cc *grpc.ClientConn) RouteGuideClient {\n\treturn &routeGuideClient{cc}\n}\n\nfunc (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) {\n\tout := new(Feature)\n\terr := grpc.Invoke(ctx, \"/proto.RouteGuide/GetFeature\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[0], c.cc, \"/proto.RouteGuide/ListFeatures\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &routeGuideListFeaturesClient{stream}\n\tif err := x.ClientStream.SendMsg(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn x, nil\n}\n\ntype RouteGuide_ListFeaturesClient interface {\n\tRecv() (*Feature, error)\n\tgrpc.ClientStream\n}\n\ntype routeGuideListFeaturesClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *routeGuideListFeaturesClient) Recv() (*Feature, error) {\n\tm := new(Feature)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[1], c.cc, \"/proto.RouteGuide/RecordRoute\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &routeGuideRecordRouteClient{stream}\n\treturn x, nil\n}\n\ntype RouteGuide_RecordRouteClient interface {\n\tSend(*Point) error\n\tCloseAndRecv() (*RouteSummary, error)\n\tgrpc.ClientStream\n}\n\ntype routeGuideRecordRouteClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *routeGuideRecordRouteClient) Send(m *Point) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) {\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\tm := new(RouteSummary)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[2], c.cc, \"/proto.RouteGuide/RouteChat\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &routeGuideRouteChatClient{stream}\n\treturn x, nil\n}\n\ntype RouteGuide_RouteChatClient interface {\n\tSend(*RouteNote) error\n\tRecv() (*RouteNote, error)\n\tgrpc.ClientStream\n}\n\ntype routeGuideRouteChatClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *routeGuideRouteChatClient) Send(m *RouteNote) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) {\n\tm := new(RouteNote)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Server API for RouteGuide service\n\ntype RouteGuideServer interface {\n\t// A simple RPC.\n\t//\n\t// Obtains the feature at a given position.\n\t//\n\t// If no feature is found for the given point, a feature with an empty name\n\t// should be returned.\n\tGetFeature(context.Context, *Point) (*Feature, error)\n\t// A server-to-client streaming RPC.\n\t//\n\t// Obtains the Features available within the given Rectangle.  Results are\n\t// streamed rather than returned at once (e.g. in a response message with a\n\t// repeated field), as the rectangle may cover a large area and contain a\n\t// huge number of features.\n\tListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error\n\t// A client-to-server streaming RPC.\n\t//\n\t// Accepts a stream of Points on a route being traversed, returning a\n\t// RouteSummary when traversal is completed.\n\tRecordRoute(RouteGuide_RecordRouteServer) error\n\t// A Bidirectional streaming RPC.\n\t//\n\t// Accepts a stream of RouteNotes sent while a route is being traversed,\n\t// while receiving other RouteNotes (e.g. from other users).\n\tRouteChat(RouteGuide_RouteChatServer) error\n}\n\nfunc RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) {\n\ts.RegisterService(&_RouteGuide_serviceDesc, srv)\n}\n\nfunc _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(Point)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(RouteGuideServer).GetFeature(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error {\n\tm := new(Rectangle)\n\tif err := stream.RecvMsg(m); err != nil {\n\t\treturn err\n\t}\n\treturn srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream})\n}\n\ntype RouteGuide_ListFeaturesServer interface {\n\tSend(*Feature) error\n\tgrpc.ServerStream\n}\n\ntype routeGuideListFeaturesServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *routeGuideListFeaturesServer) Send(m *Feature) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream})\n}\n\ntype RouteGuide_RecordRouteServer interface {\n\tSendAndClose(*RouteSummary) error\n\tRecv() (*Point, error)\n\tgrpc.ServerStream\n}\n\ntype routeGuideRecordRouteServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *routeGuideRecordRouteServer) Recv() (*Point, error) {\n\tm := new(Point)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream})\n}\n\ntype RouteGuide_RouteChatServer interface {\n\tSend(*RouteNote) error\n\tRecv() (*RouteNote, error)\n\tgrpc.ServerStream\n}\n\ntype routeGuideRouteChatServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *routeGuideRouteChatServer) Send(m *RouteNote) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) {\n\tm := new(RouteNote)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nvar _RouteGuide_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"proto.RouteGuide\",\n\tHandlerType: (*RouteGuideServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"GetFeature\",\n\t\t\tHandler:    _RouteGuide_GetFeature_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"ListFeatures\",\n\t\t\tHandler:       _RouteGuide_ListFeatures_Handler,\n\t\t\tServerStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"RecordRoute\",\n\t\t\tHandler:       _RouteGuide_RecordRoute_Handler,\n\t\t\tClientStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"RouteChat\",\n\t\t\tHandler:       _RouteGuide_RouteChat_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto",
    "content": "// Copyright 2015, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage proto;\n\n// Interface exported by the server.\nservice RouteGuide {\n  // A simple RPC.\n  //\n  // Obtains the feature at a given position.\n  //\n  // If no feature is found for the given point, a feature with an empty name\n  // should be returned.\n  rpc GetFeature(Point) returns (Feature) {}\n\n  // A server-to-client streaming RPC.\n  //\n  // Obtains the Features available within the given Rectangle.  Results are\n  // streamed rather than returned at once (e.g. in a response message with a\n  // repeated field), as the rectangle may cover a large area and contain a\n  // huge number of features.\n  rpc ListFeatures(Rectangle) returns (stream Feature) {}\n\n  // A client-to-server streaming RPC.\n  //\n  // Accepts a stream of Points on a route being traversed, returning a\n  // RouteSummary when traversal is completed.\n  rpc RecordRoute(stream Point) returns (RouteSummary) {}\n\n  // A Bidirectional streaming RPC.\n  //\n  // Accepts a stream of RouteNotes sent while a route is being traversed,\n  // while receiving other RouteNotes (e.g. from other users).\n  rpc RouteChat(stream RouteNote) returns (stream RouteNote) {}\n}\n\n// Points are represented as latitude-longitude pairs in the E7 representation\n// (degrees multiplied by 10**7 and rounded to the nearest integer).\n// Latitudes should be in the range +/- 90 degrees and longitude should be in\n// the range +/- 180 degrees (inclusive).\nmessage Point {\n  int32 latitude = 1;\n  int32 longitude = 2;\n}\n\n// A latitude-longitude rectangle, represented as two diagonally opposite\n// points \"lo\" and \"hi\".\nmessage Rectangle {\n  // One corner of the rectangle.\n  Point lo = 1;\n\n  // The other corner of the rectangle.\n  Point hi = 2;\n}\n\n// A feature names something at a given point.\n//\n// If a feature could not be named, the name is empty.\nmessage Feature {\n  // The name of the feature.\n  string name = 1;\n\n  // The point where the feature is detected.\n  Point location = 2;\n}\n\n// A RouteNote is a message sent while at a given point.\nmessage RouteNote {\n  // The location from which the message is sent.\n  Point location = 1;\n\n  // The message to be sent.\n  string message = 2;\n}\n\n// A RouteSummary is received in response to a RecordRoute rpc.\n//\n// It contains the number of individual points received, the number of\n// detected features, and the total distance covered as the cumulative sum of\n// the distance between each point.\nmessage RouteSummary {\n  // The number of points received.\n  int32 point_count = 1;\n\n  // The number of known features passed while traversing the route.\n  int32 feature_count = 2;\n\n  // The distance covered in metres.\n  int32 distance = 3;\n\n  // The duration of the traversal in seconds.\n  int32 elapsed_time = 4;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/examples/route_guide/server/server.go",
    "content": "/*\n *\n * Copyright 2015, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n// Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries\n// to perform unary, client streaming, server streaming and full duplex RPCs.\n//\n// It implements the route guide service whose definition can be found in proto/route_guide.proto.\npackage main\n\nimport (\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"net\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc\"\n\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/grpclog\"\n\n\tproto \"github.com/golang/protobuf/proto\"\n\n\tpb \"google.golang.org/grpc/examples/route_guide/proto\"\n)\n\nvar (\n\ttls        = flag.Bool(\"tls\", false, \"Connection uses TLS if true, else plain TCP\")\n\tcertFile   = flag.String(\"cert_file\", \"testdata/server1.pem\", \"The TLS cert file\")\n\tkeyFile    = flag.String(\"key_file\", \"testdata/server1.key\", \"The TLS key file\")\n\tjsonDBFile = flag.String(\"json_db_file\", \"testdata/route_guide_db.json\", \"A json file containing a list of features\")\n\tport       = flag.Int(\"port\", 10000, \"The server port\")\n)\n\ntype routeGuideServer struct {\n\tsavedFeatures []*pb.Feature\n\trouteNotes    map[string][]*pb.RouteNote\n}\n\n// GetFeature returns the feature at the given point.\nfunc (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) {\n\tfor _, feature := range s.savedFeatures {\n\t\tif proto.Equal(feature.Location, point) {\n\t\t\treturn feature, nil\n\t\t}\n\t}\n\t// No feature was found, return an unnamed feature\n\treturn &pb.Feature{\"\", point}, nil\n}\n\n// ListFeatures lists all features comtained within the given bounding Rectangle.\nfunc (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error {\n\tfor _, feature := range s.savedFeatures {\n\t\tif inRange(feature.Location, rect) {\n\t\t\tif err := stream.Send(feature); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// RecordRoute records a route composited of a sequence of points.\n//\n// It gets a stream of points, and responds with statistics about the \"trip\":\n// number of points,  number of known features visited, total distance traveled, and\n// total time spent.\nfunc (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error {\n\tvar pointCount, featureCount, distance int32\n\tvar lastPoint *pb.Point\n\tstartTime := time.Now()\n\tfor {\n\t\tpoint, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tendTime := time.Now()\n\t\t\treturn stream.SendAndClose(&pb.RouteSummary{\n\t\t\t\tPointCount:   pointCount,\n\t\t\t\tFeatureCount: featureCount,\n\t\t\t\tDistance:     distance,\n\t\t\t\tElapsedTime:  int32(endTime.Sub(startTime).Seconds()),\n\t\t\t})\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpointCount++\n\t\tfor _, feature := range s.savedFeatures {\n\t\t\tif proto.Equal(feature.Location, point) {\n\t\t\t\tfeatureCount++\n\t\t\t}\n\t\t}\n\t\tif lastPoint != nil {\n\t\t\tdistance += calcDistance(lastPoint, point)\n\t\t}\n\t\tlastPoint = point\n\t}\n}\n\n// RouteChat receives a stream of message/location pairs, and responds with a stream of all\n// previous messages at each of those locations.\nfunc (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error {\n\tfor {\n\t\tin, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkey := serialize(in.Location)\n\t\tif _, present := s.routeNotes[key]; !present {\n\t\t\ts.routeNotes[key] = []*pb.RouteNote{in}\n\t\t} else {\n\t\t\ts.routeNotes[key] = append(s.routeNotes[key], in)\n\t\t}\n\t\tfor _, note := range s.routeNotes[key] {\n\t\t\tif err := stream.Send(note); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\n// loadFeatures loads features from a JSON file.\nfunc (s *routeGuideServer) loadFeatures(filePath string) {\n\tfile, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"Failed to load default features: %v\", err)\n\t}\n\tif err := json.Unmarshal(file, &s.savedFeatures); err != nil {\n\t\tgrpclog.Fatalf(\"Failed to load default features: %v\", err)\n\t}\n}\n\nfunc toRadians(num float64) float64 {\n\treturn num * math.Pi / float64(180)\n}\n\n// calcDistance calculates the distance between two points using the \"haversine\" formula.\n// This code was taken from http://www.movable-type.co.uk/scripts/latlong.html.\nfunc calcDistance(p1 *pb.Point, p2 *pb.Point) int32 {\n\tconst CordFactor float64 = 1e7\n\tconst R float64 = float64(6371000) // metres\n\tlat1 := float64(p1.Latitude) / CordFactor\n\tlat2 := float64(p2.Latitude) / CordFactor\n\tlng1 := float64(p1.Longitude) / CordFactor\n\tlng2 := float64(p2.Longitude) / CordFactor\n\tφ1 := toRadians(lat1)\n\tφ2 := toRadians(lat2)\n\tΔφ := toRadians(lat2 - lat1)\n\tΔλ := toRadians(lng2 - lng1)\n\n\ta := math.Sin(Δφ/2)*math.Sin(Δφ/2) +\n\t\tmath.Cos(φ1)*math.Cos(φ2)*\n\t\t\tmath.Sin(Δλ/2)*math.Sin(Δλ/2)\n\tc := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))\n\n\tdistance := R * c\n\treturn int32(distance)\n}\n\nfunc inRange(point *pb.Point, rect *pb.Rectangle) bool {\n\tleft := math.Min(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude))\n\tright := math.Max(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude))\n\ttop := math.Max(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude))\n\tbottom := math.Min(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude))\n\n\tif float64(point.Longitude) >= left &&\n\t\tfloat64(point.Longitude) <= right &&\n\t\tfloat64(point.Latitude) >= bottom &&\n\t\tfloat64(point.Latitude) <= top {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc serialize(point *pb.Point) string {\n\treturn fmt.Sprintf(\"%d %d\", point.Latitude, point.Longitude)\n}\n\nfunc newServer() *routeGuideServer {\n\ts := new(routeGuideServer)\n\ts.loadFeatures(*jsonDBFile)\n\ts.routeNotes = make(map[string][]*pb.RouteNote)\n\treturn s\n}\n\nfunc main() {\n\tflag.Parse()\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", *port))\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\tvar opts []grpc.ServerOption\n\tif *tls {\n\t\tcreds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)\n\t\tif err != nil {\n\t\t\tgrpclog.Fatalf(\"Failed to generate credentials %v\", err)\n\t\t}\n\t\topts = []grpc.ServerOption{grpc.Creds(creds)}\n\t}\n\tgrpcServer := grpc.NewServer(opts...)\n\tpb.RegisterRouteGuideServer(grpcServer, newServer())\n\tgrpcServer.Serve(lis)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/grpc-auth-support.md",
    "content": "# Authentication\n\nAs outlined <a href=\"https://github.com/grpc/grpc-common/blob/master/grpc-auth-support.md\">here</a> gRPC supports a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it.\n\n# Enabling TLS on a gRPC client\n\n```Go\nconn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, \"\"))\n```\n\n# Enabling TLS on a gRPC server\n\n```Go\ncreds, err := credentials.NewServerTLSFromFile(certFile, keyFile)\nif err != nil {\n  log.Fatalf(\"Failed to generate credentials %v\", err)\n}\nlis, err := net.Listen(\"tcp\", \":0\")\nserver := grpc.NewServer(grpc.Creds(creds))\n...\nserver.Serve(lis)\n```\n\n# Authenticating with Google\n\n## Google Compute Engine (GCE)\n\n```Go\nconn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, \"\"), grpc.WithPerRPCCredentials(credentials.NewComputeEngine())))\n```\n\n## JWT\n\n```Go\njwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope)\nif err != nil {\n  log.Fatalf(\"Failed to create JWT credentials: %v\", err)\n}\nconn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, \"\"), grpc.WithPerRPCCredentials(jwtCreds)))\n```\n\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/grpclog/logger.go",
    "content": "/*\n *\n * Copyright 2015, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n/*\nPackage log defines logging for grpc.\n*/\npackage grpclog\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/golang/glog\"\n)\n\nvar (\n\t// GLogger is a Logger that uses glog. This is the default logger.\n\tGLogger Logger = &glogger{}\n\n\t// StdLogger is a Logger that uses golang's standard logger.\n\tStdLogger Logger = log.New(os.Stderr, \"\", log.LstdFlags)\n\n\tlogger = GLogger\n)\n\n// Logger mimics golang's standard Logger as an interface.\ntype Logger interface {\n\tFatal(args ...interface{})\n\tFatalf(format string, args ...interface{})\n\tFatalln(args ...interface{})\n\tPrint(args ...interface{})\n\tPrintf(format string, args ...interface{})\n\tPrintln(args ...interface{})\n}\n\n// SetLogger sets the logger that is used in grpc.\nfunc SetLogger(l Logger) {\n\tlogger = l\n}\n\n// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.\nfunc Fatal(args ...interface{}) {\n\tlogger.Fatal(args...)\n}\n\n// Fatal is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.\nfunc Fatalf(format string, args ...interface{}) {\n\tlogger.Fatalf(format, args...)\n}\n\n// Fatal is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.\nfunc Fatalln(args ...interface{}) {\n\tlogger.Fatalln(args...)\n}\n\n// Print prints to the logger. Arguments are handled in the manner of fmt.Print.\nfunc Print(args ...interface{}) {\n\tlogger.Print(args...)\n}\n\n// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.\nfunc Printf(format string, args ...interface{}) {\n\tlogger.Printf(format, args...)\n}\n\n// Println prints to the logger. Arguments are handled in the manner of fmt.Println.\nfunc Println(args ...interface{}) {\n\tlogger.Println(args...)\n}\n\ntype glogger struct{}\n\nfunc (g *glogger) Fatal(args ...interface{}) {\n\tglog.Fatal(args...)\n}\n\nfunc (g *glogger) Fatalf(format string, args ...interface{}) {\n\tglog.Fatalf(format, args...)\n}\n\nfunc (g *glogger) Fatalln(args ...interface{}) {\n\tglog.Fatalln(args...)\n}\n\nfunc (g *glogger) Print(args ...interface{}) {\n\tglog.Info(args...)\n}\n\nfunc (g *glogger) Printf(format string, args ...interface{}) {\n\tglog.Infof(format, args...)\n}\n\nfunc (g *glogger) Println(args ...interface{}) {\n\tglog.Infoln(args...)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/interop/client/client.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/grpclog\"\n\ttestpb \"google.golang.org/grpc/interop/grpc_testing\"\n\t\"google.golang.org/grpc/metadata\"\n)\n\nvar (\n\tuseTLS                = flag.Bool(\"use_tls\", false, \"Connection uses TLS if true, else plain TCP\")\n\tcaFile                = flag.String(\"tls_ca_file\", \"testdata/ca.pem\", \"The file containning the CA root cert file\")\n\tserviceAccountKeyFile = flag.String(\"service_account_key_file\", \"\", \"Path to service account json key file\")\n\toauthScope            = flag.String(\"oauth_scope\", \"\", \"The scope for OAuth2 tokens\")\n\tdefaultServiceAccount = flag.String(\"default_service_account\", \"\", \"Email of GCE default service account\")\n\tserverHost            = flag.String(\"server_host\", \"127.0.0.1\", \"The server host name\")\n\tserverPort            = flag.Int(\"server_port\", 10000, \"The server port number\")\n\ttlsServerName         = flag.String(\"server_host_override\", \"x.test.youtube.com\", \"The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.\")\n\ttestCase              = flag.String(\"test_case\", \"large_unary\",\n\t\t`Configure different test cases. Valid options are:\n        empty_unary : empty (zero bytes) request and response;\n        large_unary : single request and (large) response;\n        client_streaming : request streaming with single response;\n        server_streaming : single request with response streaming;\n        ping_pong : full-duplex streaming;\n        compute_engine_creds: large_unary with compute engine auth;\n\tservice_account_creds: large_unary with service account auth;\n\tcancel_after_begin: cancellation after metadata has been sent but before payloads are sent;\n\tcancel_after_first_response: cancellation after receiving 1st message from the server.`)\n)\n\nvar (\n\treqSizes      = []int{27182, 8, 1828, 45904}\n\trespSizes     = []int{31415, 9, 2653, 58979}\n\tlargeReqSize  = 271828\n\tlargeRespSize = 314159\n)\n\nfunc newPayload(t testpb.PayloadType, size int) *testpb.Payload {\n\tif size < 0 {\n\t\tgrpclog.Fatalf(\"Requested a response with invalid length %d\", size)\n\t}\n\tbody := make([]byte, size)\n\tswitch t {\n\tcase testpb.PayloadType_COMPRESSABLE:\n\tcase testpb.PayloadType_UNCOMPRESSABLE:\n\t\tgrpclog.Fatalf(\"PayloadType UNCOMPRESSABLE is not supported\")\n\tdefault:\n\t\tgrpclog.Fatalf(\"Unsupported payload type: %d\", t)\n\t}\n\treturn &testpb.Payload{\n\t\tType: t.Enum(),\n\t\tBody: body,\n\t}\n}\n\nfunc doEmptyUnaryCall(tc testpb.TestServiceClient) {\n\treply, err := tc.EmptyCall(context.Background(), &testpb.Empty{})\n\tif err != nil {\n\t\tgrpclog.Fatal(\"/TestService/EmptyCall RPC failed: \", err)\n\t}\n\tif !proto.Equal(&testpb.Empty{}, reply) {\n\t\tgrpclog.Fatalf(\"/TestService/EmptyCall receives %v, want %v\", reply, testpb.Empty{})\n\t}\n\tgrpclog.Println(\"EmptyUnaryCall done\")\n}\n\nfunc doLargeUnaryCall(tc testpb.TestServiceClient) {\n\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize)\n\treq := &testpb.SimpleRequest{\n\t\tResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),\n\t\tResponseSize: proto.Int32(int32(largeRespSize)),\n\t\tPayload:      pl,\n\t}\n\treply, err := tc.UnaryCall(context.Background(), req)\n\tif err != nil {\n\t\tgrpclog.Fatal(\"/TestService/UnaryCall RPC failed: \", err)\n\t}\n\tt := reply.GetPayload().GetType()\n\ts := len(reply.GetPayload().GetBody())\n\tif t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize {\n\t\tgrpclog.Fatalf(\"Got the reply with type %d len %d; want %d, %d\", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize)\n\t}\n\tgrpclog.Println(\"LargeUnaryCall done\")\n}\n\nfunc doClientStreaming(tc testpb.TestServiceClient) {\n\tstream, err := tc.StreamingInputCall(context.Background())\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.StreamingInputCall(_) = _, %v\", tc, err)\n\t}\n\tvar sum int\n\tfor _, s := range reqSizes {\n\t\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, s)\n\t\treq := &testpb.StreamingInputCallRequest{\n\t\t\tPayload: pl,\n\t\t}\n\t\tif err := stream.Send(req); err != nil {\n\t\t\tgrpclog.Fatalf(\"%v.Send(%v) = %v\", stream, req, err)\n\t\t}\n\t\tsum += s\n\t\tgrpclog.Printf(\"Sent a request of size %d, aggregated size %d\", s, sum)\n\n\t}\n\treply, err := stream.CloseAndRecv()\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.CloseAndRecv() got error %v, want %v\", stream, err, nil)\n\t}\n\tif reply.GetAggregatedPayloadSize() != int32(sum) {\n\t\tgrpclog.Fatalf(\"%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v\", stream, reply.GetAggregatedPayloadSize(), sum)\n\t}\n\tgrpclog.Println(\"ClientStreaming done\")\n}\n\nfunc doServerStreaming(tc testpb.TestServiceClient) {\n\trespParam := make([]*testpb.ResponseParameters, len(respSizes))\n\tfor i, s := range respSizes {\n\t\trespParam[i] = &testpb.ResponseParameters{\n\t\t\tSize: proto.Int32(int32(s)),\n\t\t}\n\t}\n\treq := &testpb.StreamingOutputCallRequest{\n\t\tResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),\n\t\tResponseParameters: respParam,\n\t}\n\tstream, err := tc.StreamingOutputCall(context.Background(), req)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.StreamingOutputCall(_) = _, %v\", tc, err)\n\t}\n\tvar rpcStatus error\n\tvar respCnt int\n\tvar index int\n\tfor {\n\t\treply, err := stream.Recv()\n\t\tif err != nil {\n\t\t\trpcStatus = err\n\t\t\tbreak\n\t\t}\n\t\tt := reply.GetPayload().GetType()\n\t\tif t != testpb.PayloadType_COMPRESSABLE {\n\t\t\tgrpclog.Fatalf(\"Got the reply of type %d, want %d\", t, testpb.PayloadType_COMPRESSABLE)\n\t\t}\n\t\tsize := len(reply.GetPayload().GetBody())\n\t\tif size != int(respSizes[index]) {\n\t\t\tgrpclog.Fatalf(\"Got reply body of length %d, want %d\", size, respSizes[index])\n\t\t}\n\t\tindex++\n\t\trespCnt++\n\t}\n\tif rpcStatus != io.EOF {\n\t\tgrpclog.Fatalf(\"Failed to finish the server streaming rpc: %v\", err)\n\t}\n\tif respCnt != len(respSizes) {\n\t\tgrpclog.Fatalf(\"Got %d reply, want %d\", len(respSizes), respCnt)\n\t}\n\tgrpclog.Println(\"ServerStreaming done\")\n}\n\nfunc doPingPong(tc testpb.TestServiceClient) {\n\tstream, err := tc.FullDuplexCall(context.Background())\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.FullDuplexCall(_) = _, %v\", tc, err)\n\t}\n\tvar index int\n\tfor index < len(reqSizes) {\n\t\trespParam := []*testpb.ResponseParameters{\n\t\t\t{\n\t\t\t\tSize: proto.Int32(int32(respSizes[index])),\n\t\t\t},\n\t\t}\n\t\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index])\n\t\treq := &testpb.StreamingOutputCallRequest{\n\t\t\tResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),\n\t\t\tResponseParameters: respParam,\n\t\t\tPayload:            pl,\n\t\t}\n\t\tif err := stream.Send(req); err != nil {\n\t\t\tgrpclog.Fatalf(\"%v.Send(%v) = %v\", stream, req, err)\n\t\t}\n\t\treply, err := stream.Recv()\n\t\tif err != nil {\n\t\t\tgrpclog.Fatalf(\"%v.Recv() = %v\", stream, err)\n\t\t}\n\t\tt := reply.GetPayload().GetType()\n\t\tif t != testpb.PayloadType_COMPRESSABLE {\n\t\t\tgrpclog.Fatalf(\"Got the reply of type %d, want %d\", t, testpb.PayloadType_COMPRESSABLE)\n\t\t}\n\t\tsize := len(reply.GetPayload().GetBody())\n\t\tif size != int(respSizes[index]) {\n\t\t\tgrpclog.Fatalf(\"Got reply body of length %d, want %d\", size, respSizes[index])\n\t\t}\n\t\tindex++\n\t}\n\tif err := stream.CloseSend(); err != nil {\n\t\tgrpclog.Fatalf(\"%v.CloseSend() got %v, want %v\", stream, err, nil)\n\t}\n\tif _, err := stream.Recv(); err != io.EOF {\n\t\tgrpclog.Fatalf(\"%v failed to complele the ping pong test: %v\", stream, err)\n\t}\n\tgrpclog.Println(\"Pingpong done\")\n}\n\nfunc doComputeEngineCreds(tc testpb.TestServiceClient) {\n\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize)\n\treq := &testpb.SimpleRequest{\n\t\tResponseType:   testpb.PayloadType_COMPRESSABLE.Enum(),\n\t\tResponseSize:   proto.Int32(int32(largeRespSize)),\n\t\tPayload:        pl,\n\t\tFillUsername:   proto.Bool(true),\n\t\tFillOauthScope: proto.Bool(true),\n\t}\n\treply, err := tc.UnaryCall(context.Background(), req)\n\tif err != nil {\n\t\tgrpclog.Fatal(\"/TestService/UnaryCall RPC failed: \", err)\n\t}\n\tuser := reply.GetUsername()\n\tscope := reply.GetOauthScope()\n\tif user != *defaultServiceAccount {\n\t\tgrpclog.Fatalf(\"Got user name %q, want %q.\", user, *defaultServiceAccount)\n\t}\n\tif !strings.Contains(*oauthScope, scope) {\n\t\tgrpclog.Fatalf(\"Got OAuth scope %q which is NOT a substring of %q.\", scope, *oauthScope)\n\t}\n\tgrpclog.Println(\"ComputeEngineCreds done\")\n}\n\nfunc getServiceAccountJSONKey() []byte {\n\tjsonKey, err := ioutil.ReadFile(*serviceAccountKeyFile)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"Failed to read the service account key file: %v\", err)\n\t}\n\treturn jsonKey\n}\n\nfunc doServiceAccountCreds(tc testpb.TestServiceClient) {\n\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize)\n\treq := &testpb.SimpleRequest{\n\t\tResponseType:   testpb.PayloadType_COMPRESSABLE.Enum(),\n\t\tResponseSize:   proto.Int32(int32(largeRespSize)),\n\t\tPayload:        pl,\n\t\tFillUsername:   proto.Bool(true),\n\t\tFillOauthScope: proto.Bool(true),\n\t}\n\treply, err := tc.UnaryCall(context.Background(), req)\n\tif err != nil {\n\t\tgrpclog.Fatal(\"/TestService/UnaryCall RPC failed: \", err)\n\t}\n\tjsonKey := getServiceAccountJSONKey()\n\tuser := reply.GetUsername()\n\tscope := reply.GetOauthScope()\n\tif !strings.Contains(string(jsonKey), user) {\n\t\tgrpclog.Fatalf(\"Got user name %q which is NOT a substring of %q.\", user, jsonKey)\n\t}\n\tif !strings.Contains(*oauthScope, scope) {\n\t\tgrpclog.Fatalf(\"Got OAuth scope %q which is NOT a substring of %q.\", scope, *oauthScope)\n\t}\n\tgrpclog.Println(\"ServiceAccountCreds done\")\n}\n\nvar (\n\ttestMetadata = metadata.MD{\n\t\t\"key1\": \"value1\",\n\t\t\"key2\": \"value2\",\n\t}\n)\n\nfunc doCancelAfterBegin(tc testpb.TestServiceClient) {\n\tctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata))\n\tstream, err := tc.StreamingInputCall(ctx)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.StreamingInputCall(_) = _, %v\", tc, err)\n\t}\n\tcancel()\n\t_, err = stream.CloseAndRecv()\n\tif grpc.Code(err) != codes.Canceled {\n\t\tgrpclog.Fatalf(\"%v.CloseAndRecv() got error code %d, want %d\", stream, grpc.Code(err), codes.Canceled)\n\t}\n\tgrpclog.Println(\"CancelAfterBegin done\")\n}\n\nfunc doCancelAfterFirstResponse(tc testpb.TestServiceClient) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tstream, err := tc.FullDuplexCall(ctx)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"%v.FullDuplexCall(_) = _, %v\", tc, err)\n\t}\n\trespParam := []*testpb.ResponseParameters{\n\t\t{\n\t\t\tSize: proto.Int32(31415),\n\t\t},\n\t}\n\tpl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182)\n\treq := &testpb.StreamingOutputCallRequest{\n\t\tResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),\n\t\tResponseParameters: respParam,\n\t\tPayload:            pl,\n\t}\n\tif err := stream.Send(req); err != nil {\n\t\tgrpclog.Fatalf(\"%v.Send(%v) = %v\", stream, req, err)\n\t}\n\tif _, err := stream.Recv(); err != nil {\n\t\tgrpclog.Fatalf(\"%v.Recv() = %v\", stream, err)\n\t}\n\tcancel()\n\tif _, err := stream.Recv(); grpc.Code(err) != codes.Canceled {\n\t\tgrpclog.Fatalf(\"%v compleled with error code %d, want %d\", stream, grpc.Code(err), codes.Canceled)\n\t}\n\tgrpclog.Println(\"CancelAfterFirstResponse done\")\n}\n\nfunc main() {\n\tflag.Parse()\n\tserverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort))\n\tvar opts []grpc.DialOption\n\tif *useTLS {\n\t\tvar sn string\n\t\tif *tlsServerName != \"\" {\n\t\t\tsn = *tlsServerName\n\t\t}\n\t\tvar creds credentials.TransportAuthenticator\n\t\tif *caFile != \"\" {\n\t\t\tvar err error\n\t\t\tcreds, err = credentials.NewClientTLSFromFile(*caFile, sn)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Failed to create TLS credentials %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tcreds = credentials.NewClientTLSFromCert(nil, sn)\n\t\t}\n\t\topts = append(opts, grpc.WithTransportCredentials(creds))\n\t\tif *testCase == \"compute_engine_creds\" {\n\t\t\topts = append(opts, grpc.WithPerRPCCredentials(credentials.NewComputeEngine()))\n\t\t} else if *testCase == \"service_account_creds\" {\n\t\t\tjwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Failed to create JWT credentials: %v\", err)\n\t\t\t}\n\t\t\topts = append(opts, grpc.WithPerRPCCredentials(jwtCreds))\n\t\t}\n\t}\n\tconn, err := grpc.Dial(serverAddr, opts...)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"Fail to dial: %v\", err)\n\t}\n\tdefer conn.Close()\n\ttc := testpb.NewTestServiceClient(conn)\n\tswitch *testCase {\n\tcase \"empty_unary\":\n\t\tdoEmptyUnaryCall(tc)\n\tcase \"large_unary\":\n\t\tdoLargeUnaryCall(tc)\n\tcase \"client_streaming\":\n\t\tdoClientStreaming(tc)\n\tcase \"server_streaming\":\n\t\tdoServerStreaming(tc)\n\tcase \"ping_pong\":\n\t\tdoPingPong(tc)\n\tcase \"compute_engine_creds\":\n\t\tif !*useTLS {\n\t\t\tgrpclog.Fatalf(\"TLS is not enabled. TLS is required to execute compute_engine_creds test case.\")\n\t\t}\n\t\tdoComputeEngineCreds(tc)\n\tcase \"service_account_creds\":\n\t\tif !*useTLS {\n\t\t\tgrpclog.Fatalf(\"TLS is not enabled. TLS is required to execute service_account_creds test case.\")\n\t\t}\n\t\tdoServiceAccountCreds(tc)\n\tcase \"cancel_after_begin\":\n\t\tdoCancelAfterBegin(tc)\n\tcase \"cancel_after_first_response\":\n\t\tdoCancelAfterFirstResponse(tc)\n\tdefault:\n\t\tgrpclog.Fatal(\"Unsupported test case: \", *testCase)\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: src/google.golang.org/grpc/test/grpc_testing/test.proto\n// DO NOT EDIT!\n\n/*\nPackage grpc_testing is a generated protocol buffer package.\n\nIt is generated from these files:\n\tsrc/google.golang.org/grpc/test/grpc_testing/test.proto\n\nIt has these top-level messages:\n\tEmpty\n\tPayload\n\tSimpleRequest\n\tSimpleResponse\n\tStreamingInputCallRequest\n\tStreamingInputCallResponse\n\tResponseParameters\n\tStreamingOutputCallRequest\n\tStreamingOutputCallResponse\n*/\npackage grpc_testing\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport math \"math\"\n\nimport (\n\tcontext \"golang.org/x/net/context\"\n\tgrpc \"google.golang.org/grpc\"\n)\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ context.Context\nvar _ grpc.ClientConn\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = math.Inf\n\n// The type of payload that should be returned.\ntype PayloadType int32\n\nconst (\n\t// Compressable text format.\n\tPayloadType_COMPRESSABLE PayloadType = 0\n\t// Uncompressable binary format.\n\tPayloadType_UNCOMPRESSABLE PayloadType = 1\n\t// Randomly chosen from all other formats defined in this enum.\n\tPayloadType_RANDOM PayloadType = 2\n)\n\nvar PayloadType_name = map[int32]string{\n\t0: \"COMPRESSABLE\",\n\t1: \"UNCOMPRESSABLE\",\n\t2: \"RANDOM\",\n}\nvar PayloadType_value = map[string]int32{\n\t\"COMPRESSABLE\":   0,\n\t\"UNCOMPRESSABLE\": 1,\n\t\"RANDOM\":         2,\n}\n\nfunc (x PayloadType) Enum() *PayloadType {\n\tp := new(PayloadType)\n\t*p = x\n\treturn p\n}\nfunc (x PayloadType) String() string {\n\treturn proto.EnumName(PayloadType_name, int32(x))\n}\nfunc (x *PayloadType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PayloadType_value, data, \"PayloadType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PayloadType(value)\n\treturn nil\n}\n\ntype Empty struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Empty) Reset()         { *m = Empty{} }\nfunc (m *Empty) String() string { return proto.CompactTextString(m) }\nfunc (*Empty) ProtoMessage()    {}\n\n// A block of data, to simply increase gRPC message size.\ntype Payload struct {\n\t// The type of data in body.\n\tType *PayloadType `protobuf:\"varint,1,opt,name=type,enum=grpc.testing.PayloadType\" json:\"type,omitempty\"`\n\t// Primary contents of payload.\n\tBody             []byte `protobuf:\"bytes,2,opt,name=body\" json:\"body,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Payload) Reset()         { *m = Payload{} }\nfunc (m *Payload) String() string { return proto.CompactTextString(m) }\nfunc (*Payload) ProtoMessage()    {}\n\nfunc (m *Payload) GetType() PayloadType {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn PayloadType_COMPRESSABLE\n}\n\nfunc (m *Payload) GetBody() []byte {\n\tif m != nil {\n\t\treturn m.Body\n\t}\n\treturn nil\n}\n\n// Unary request.\ntype SimpleRequest struct {\n\t// Desired payload type in the response from the server.\n\t// If response_type is RANDOM, server randomly chooses one from other formats.\n\tResponseType *PayloadType `protobuf:\"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType\" json:\"response_type,omitempty\"`\n\t// Desired payload size in the response from the server.\n\t// If response_type is COMPRESSABLE, this denotes the size before compression.\n\tResponseSize *int32 `protobuf:\"varint,2,opt,name=response_size\" json:\"response_size,omitempty\"`\n\t// Optional input payload sent along with the request.\n\tPayload *Payload `protobuf:\"bytes,3,opt,name=payload\" json:\"payload,omitempty\"`\n\t// Whether SimpleResponse should include username.\n\tFillUsername *bool `protobuf:\"varint,4,opt,name=fill_username\" json:\"fill_username,omitempty\"`\n\t// Whether SimpleResponse should include OAuth scope.\n\tFillOauthScope   *bool  `protobuf:\"varint,5,opt,name=fill_oauth_scope\" json:\"fill_oauth_scope,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SimpleRequest) Reset()         { *m = SimpleRequest{} }\nfunc (m *SimpleRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SimpleRequest) ProtoMessage()    {}\n\nfunc (m *SimpleRequest) GetResponseType() PayloadType {\n\tif m != nil && m.ResponseType != nil {\n\t\treturn *m.ResponseType\n\t}\n\treturn PayloadType_COMPRESSABLE\n}\n\nfunc (m *SimpleRequest) GetResponseSize() int32 {\n\tif m != nil && m.ResponseSize != nil {\n\t\treturn *m.ResponseSize\n\t}\n\treturn 0\n}\n\nfunc (m *SimpleRequest) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc (m *SimpleRequest) GetFillUsername() bool {\n\tif m != nil && m.FillUsername != nil {\n\t\treturn *m.FillUsername\n\t}\n\treturn false\n}\n\nfunc (m *SimpleRequest) GetFillOauthScope() bool {\n\tif m != nil && m.FillOauthScope != nil {\n\t\treturn *m.FillOauthScope\n\t}\n\treturn false\n}\n\n// Unary response, as configured by the request.\ntype SimpleResponse struct {\n\t// Payload to increase message size.\n\tPayload *Payload `protobuf:\"bytes,1,opt,name=payload\" json:\"payload,omitempty\"`\n\t// The user the request came from, for verifying authentication was\n\t// successful when the client expected it.\n\tUsername *string `protobuf:\"bytes,2,opt,name=username\" json:\"username,omitempty\"`\n\t// OAuth scope.\n\tOauthScope       *string `protobuf:\"bytes,3,opt,name=oauth_scope\" json:\"oauth_scope,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SimpleResponse) Reset()         { *m = SimpleResponse{} }\nfunc (m *SimpleResponse) String() string { return proto.CompactTextString(m) }\nfunc (*SimpleResponse) ProtoMessage()    {}\n\nfunc (m *SimpleResponse) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc (m *SimpleResponse) GetUsername() string {\n\tif m != nil && m.Username != nil {\n\t\treturn *m.Username\n\t}\n\treturn \"\"\n}\n\nfunc (m *SimpleResponse) GetOauthScope() string {\n\tif m != nil && m.OauthScope != nil {\n\t\treturn *m.OauthScope\n\t}\n\treturn \"\"\n}\n\n// Client-streaming request.\ntype StreamingInputCallRequest struct {\n\t// Optional input payload sent along with the request.\n\tPayload          *Payload `protobuf:\"bytes,1,opt,name=payload\" json:\"payload,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *StreamingInputCallRequest) Reset()         { *m = StreamingInputCallRequest{} }\nfunc (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingInputCallRequest) ProtoMessage()    {}\n\nfunc (m *StreamingInputCallRequest) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\n// Client-streaming response.\ntype StreamingInputCallResponse struct {\n\t// Aggregated size of payloads received from the client.\n\tAggregatedPayloadSize *int32 `protobuf:\"varint,1,opt,name=aggregated_payload_size\" json:\"aggregated_payload_size,omitempty\"`\n\tXXX_unrecognized      []byte `json:\"-\"`\n}\n\nfunc (m *StreamingInputCallResponse) Reset()         { *m = StreamingInputCallResponse{} }\nfunc (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingInputCallResponse) ProtoMessage()    {}\n\nfunc (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 {\n\tif m != nil && m.AggregatedPayloadSize != nil {\n\t\treturn *m.AggregatedPayloadSize\n\t}\n\treturn 0\n}\n\n// Configuration for a particular response.\ntype ResponseParameters struct {\n\t// Desired payload sizes in responses from the server.\n\t// If response_type is COMPRESSABLE, this denotes the size before compression.\n\tSize *int32 `protobuf:\"varint,1,opt,name=size\" json:\"size,omitempty\"`\n\t// Desired interval between consecutive responses in the response stream in\n\t// microseconds.\n\tIntervalUs       *int32 `protobuf:\"varint,2,opt,name=interval_us\" json:\"interval_us,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ResponseParameters) Reset()         { *m = ResponseParameters{} }\nfunc (m *ResponseParameters) String() string { return proto.CompactTextString(m) }\nfunc (*ResponseParameters) ProtoMessage()    {}\n\nfunc (m *ResponseParameters) GetSize() int32 {\n\tif m != nil && m.Size != nil {\n\t\treturn *m.Size\n\t}\n\treturn 0\n}\n\nfunc (m *ResponseParameters) GetIntervalUs() int32 {\n\tif m != nil && m.IntervalUs != nil {\n\t\treturn *m.IntervalUs\n\t}\n\treturn 0\n}\n\n// Server-streaming request.\ntype StreamingOutputCallRequest struct {\n\t// Desired payload type in the response from the server.\n\t// If response_type is RANDOM, the payload from each response in the stream\n\t// might be of different types. This is to simulate a mixed type of payload\n\t// stream.\n\tResponseType *PayloadType `protobuf:\"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType\" json:\"response_type,omitempty\"`\n\t// Configuration for each expected response message.\n\tResponseParameters []*ResponseParameters `protobuf:\"bytes,2,rep,name=response_parameters\" json:\"response_parameters,omitempty\"`\n\t// Optional input payload sent along with the request.\n\tPayload          *Payload `protobuf:\"bytes,3,opt,name=payload\" json:\"payload,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *StreamingOutputCallRequest) Reset()         { *m = StreamingOutputCallRequest{} }\nfunc (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingOutputCallRequest) ProtoMessage()    {}\n\nfunc (m *StreamingOutputCallRequest) GetResponseType() PayloadType {\n\tif m != nil && m.ResponseType != nil {\n\t\treturn *m.ResponseType\n\t}\n\treturn PayloadType_COMPRESSABLE\n}\n\nfunc (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters {\n\tif m != nil {\n\t\treturn m.ResponseParameters\n\t}\n\treturn nil\n}\n\nfunc (m *StreamingOutputCallRequest) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\n// Server-streaming response, as configured by the request and parameters.\ntype StreamingOutputCallResponse struct {\n\t// Payload to increase response size.\n\tPayload          *Payload `protobuf:\"bytes,1,opt,name=payload\" json:\"payload,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *StreamingOutputCallResponse) Reset()         { *m = StreamingOutputCallResponse{} }\nfunc (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingOutputCallResponse) ProtoMessage()    {}\n\nfunc (m *StreamingOutputCallResponse) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"grpc.testing.PayloadType\", PayloadType_name, PayloadType_value)\n}\n\n// Client API for TestService service\n\ntype TestServiceClient interface {\n\t// One empty request followed by one empty response.\n\tEmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tUnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error)\n\t// One request followed by a sequence of responses (streamed download).\n\t// The server returns the payload with client desired type and sizes.\n\tStreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error)\n\t// A sequence of requests followed by one response (streamed upload).\n\t// The server returns the aggregated size of client payload as the result.\n\tStreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error)\n\t// A sequence of requests with each request served by the server immediately.\n\t// As one request could lead to multiple responses, this interface\n\t// demonstrates the idea of full duplexing.\n\tFullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error)\n\t// A sequence of requests followed by a sequence of responses.\n\t// The server buffers all the client requests and then serves them in order. A\n\t// stream of responses are returned to the client when the server starts with\n\t// first request.\n\tHalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error)\n}\n\ntype testServiceClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient {\n\treturn &testServiceClient{cc}\n}\n\nfunc (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {\n\tout := new(Empty)\n\terr := grpc.Invoke(ctx, \"/grpc.testing.TestService/EmptyCall\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) {\n\tout := new(SimpleResponse)\n\terr := grpc.Invoke(ctx, \"/grpc.testing.TestService/UnaryCall\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, \"/grpc.testing.TestService/StreamingOutputCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceStreamingOutputCallClient{stream}\n\tif err := x.ClientStream.SendMsg(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn x, nil\n}\n\ntype TestService_StreamingOutputCallClient interface {\n\tRecv() (*StreamingOutputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceStreamingOutputCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) {\n\tm := new(StreamingOutputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, \"/grpc.testing.TestService/StreamingInputCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceStreamingInputCallClient{stream}\n\treturn x, nil\n}\n\ntype TestService_StreamingInputCallClient interface {\n\tSend(*StreamingInputCallRequest) error\n\tCloseAndRecv() (*StreamingInputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceStreamingInputCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) {\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\tm := new(StreamingInputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, \"/grpc.testing.TestService/FullDuplexCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceFullDuplexCallClient{stream}\n\treturn x, nil\n}\n\ntype TestService_FullDuplexCallClient interface {\n\tSend(*StreamingOutputCallRequest) error\n\tRecv() (*StreamingOutputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceFullDuplexCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) {\n\tm := new(StreamingOutputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, \"/grpc.testing.TestService/HalfDuplexCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceHalfDuplexCallClient{stream}\n\treturn x, nil\n}\n\ntype TestService_HalfDuplexCallClient interface {\n\tSend(*StreamingOutputCallRequest) error\n\tRecv() (*StreamingOutputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceHalfDuplexCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) {\n\tm := new(StreamingOutputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Server API for TestService service\n\ntype TestServiceServer interface {\n\t// One empty request followed by one empty response.\n\tEmptyCall(context.Context, *Empty) (*Empty, error)\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tUnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error)\n\t// One request followed by a sequence of responses (streamed download).\n\t// The server returns the payload with client desired type and sizes.\n\tStreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error\n\t// A sequence of requests followed by one response (streamed upload).\n\t// The server returns the aggregated size of client payload as the result.\n\tStreamingInputCall(TestService_StreamingInputCallServer) error\n\t// A sequence of requests with each request served by the server immediately.\n\t// As one request could lead to multiple responses, this interface\n\t// demonstrates the idea of full duplexing.\n\tFullDuplexCall(TestService_FullDuplexCallServer) error\n\t// A sequence of requests followed by a sequence of responses.\n\t// The server buffers all the client requests and then serves them in order. A\n\t// stream of responses are returned to the client when the server starts with\n\t// first request.\n\tHalfDuplexCall(TestService_HalfDuplexCallServer) error\n}\n\nfunc RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) {\n\ts.RegisterService(&_TestService_serviceDesc, srv)\n}\n\nfunc _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(Empty)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(TestServiceServer).EmptyCall(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(SimpleRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(TestServiceServer).UnaryCall(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\tm := new(StreamingOutputCallRequest)\n\tif err := stream.RecvMsg(m); err != nil {\n\t\treturn err\n\t}\n\treturn srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream})\n}\n\ntype TestService_StreamingOutputCallServer interface {\n\tSend(*StreamingOutputCallResponse) error\n\tgrpc.ServerStream\n}\n\ntype testServiceStreamingOutputCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream})\n}\n\ntype TestService_StreamingInputCallServer interface {\n\tSendAndClose(*StreamingInputCallResponse) error\n\tRecv() (*StreamingInputCallRequest, error)\n\tgrpc.ServerStream\n}\n\ntype testServiceStreamingInputCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) {\n\tm := new(StreamingInputCallRequest)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream})\n}\n\ntype TestService_FullDuplexCallServer interface {\n\tSend(*StreamingOutputCallResponse) error\n\tRecv() (*StreamingOutputCallRequest, error)\n\tgrpc.ServerStream\n}\n\ntype testServiceFullDuplexCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) {\n\tm := new(StreamingOutputCallRequest)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream})\n}\n\ntype TestService_HalfDuplexCallServer interface {\n\tSend(*StreamingOutputCallResponse) error\n\tRecv() (*StreamingOutputCallRequest, error)\n\tgrpc.ServerStream\n}\n\ntype testServiceHalfDuplexCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) {\n\tm := new(StreamingOutputCallRequest)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nvar _TestService_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"grpc.testing.TestService\",\n\tHandlerType: (*TestServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"EmptyCall\",\n\t\t\tHandler:    _TestService_EmptyCall_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"UnaryCall\",\n\t\t\tHandler:    _TestService_UnaryCall_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"StreamingOutputCall\",\n\t\t\tHandler:       _TestService_StreamingOutputCall_Handler,\n\t\t\tServerStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"StreamingInputCall\",\n\t\t\tHandler:       _TestService_StreamingInputCall_Handler,\n\t\t\tClientStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"FullDuplexCall\",\n\t\t\tHandler:       _TestService_FullDuplexCall_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"HalfDuplexCall\",\n\t\t\tHandler:       _TestService_HalfDuplexCall_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/interop/grpc_testing/test.proto",
    "content": "// An integration test service that covers all the method signature permutations\n// of unary/streaming requests/responses.\nsyntax = \"proto2\";\n\npackage grpc.testing;\n\nmessage Empty {}\n\n// The type of payload that should be returned.\nenum PayloadType {\n  // Compressable text format.\n  COMPRESSABLE = 0;\n\n  // Uncompressable binary format.\n  UNCOMPRESSABLE = 1;\n\n  // Randomly chosen from all other formats defined in this enum.\n  RANDOM = 2;\n}\n\n// A block of data, to simply increase gRPC message size.\nmessage Payload {\n  // The type of data in body.\n  optional PayloadType type = 1;\n  // Primary contents of payload.\n  optional bytes body = 2;\n}\n\n// Unary request.\nmessage SimpleRequest {\n  // Desired payload type in the response from the server.\n  // If response_type is RANDOM, server randomly chooses one from other formats.\n  optional PayloadType response_type = 1;\n\n  // Desired payload size in the response from the server.\n  // If response_type is COMPRESSABLE, this denotes the size before compression.\n  optional int32 response_size = 2;\n\n  // Optional input payload sent along with the request.\n  optional Payload payload = 3;\n\n  // Whether SimpleResponse should include username.\n  optional bool fill_username = 4;\n\n  // Whether SimpleResponse should include OAuth scope.\n  optional bool fill_oauth_scope = 5;\n}\n\n// Unary response, as configured by the request.\nmessage SimpleResponse {\n  // Payload to increase message size.\n  optional Payload payload = 1;\n\n  // The user the request came from, for verifying authentication was\n  // successful when the client expected it.\n  optional string username = 2;\n  \n  // OAuth scope.\n  optional string oauth_scope = 3;\n}\n\n// Client-streaming request.\nmessage StreamingInputCallRequest {\n  // Optional input payload sent along with the request.\n  optional Payload payload = 1;\n\n  // Not expecting any payload from the response.\n}\n\n// Client-streaming response.\nmessage StreamingInputCallResponse {\n  // Aggregated size of payloads received from the client.\n  optional int32 aggregated_payload_size = 1;\n}\n\n// Configuration for a particular response.\nmessage ResponseParameters {\n  // Desired payload sizes in responses from the server.\n  // If response_type is COMPRESSABLE, this denotes the size before compression.\n  optional int32 size = 1;\n\n  // Desired interval between consecutive responses in the response stream in\n  // microseconds.\n  optional int32 interval_us = 2;\n}\n\n// Server-streaming request.\nmessage StreamingOutputCallRequest {\n  // Desired payload type in the response from the server.\n  // If response_type is RANDOM, the payload from each response in the stream\n  // might be of different types. This is to simulate a mixed type of payload\n  // stream.\n  optional PayloadType response_type = 1;\n\n  // Configuration for each expected response message.\n  repeated ResponseParameters response_parameters = 2;\n\n  // Optional input payload sent along with the request.\n  optional Payload payload = 3;\n}\n\n// Server-streaming response, as configured by the request and parameters.\nmessage StreamingOutputCallResponse {\n  // Payload to increase response size.\n  optional Payload payload = 1;\n}\n\n// A simple service to test the various types of RPCs and experiment with\n// performance with various types of payload.\nservice TestService {\n  // One empty request followed by one empty response.\n  rpc EmptyCall(Empty) returns (Empty);\n\n  // One request followed by one response.\n  // The server returns the client payload as-is.\n  rpc UnaryCall(SimpleRequest) returns (SimpleResponse);\n\n  // One request followed by a sequence of responses (streamed download).\n  // The server returns the payload with client desired type and sizes.\n  rpc StreamingOutputCall(StreamingOutputCallRequest)\n      returns (stream StreamingOutputCallResponse);\n\n  // A sequence of requests followed by one response (streamed upload).\n  // The server returns the aggregated size of client payload as the result.\n  rpc StreamingInputCall(stream StreamingInputCallRequest)\n      returns (StreamingInputCallResponse);\n\n  // A sequence of requests with each request served by the server immediately.\n  // As one request could lead to multiple responses, this interface\n  // demonstrates the idea of full duplexing.\n  rpc FullDuplexCall(stream StreamingOutputCallRequest)\n      returns (stream StreamingOutputCallResponse);\n\n  // A sequence of requests followed by a sequence of responses.\n  // The server buffers all the client requests and then serves them in order. A\n  // stream of responses are returned to the client when the server starts with\n  // first request.\n  rpc HalfDuplexCall(stream StreamingOutputCallRequest)\n      returns (stream StreamingOutputCallResponse);\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/interop/server/server.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/grpclog\"\n\ttestpb \"google.golang.org/grpc/interop/grpc_testing\"\n)\n\nvar (\n\tuseTLS   = flag.Bool(\"use_tls\", false, \"Connection uses TLS if true, else plain TCP\")\n\tcertFile = flag.String(\"tls_cert_file\", \"testdata/server1.pem\", \"The TLS cert file\")\n\tkeyFile  = flag.String(\"tls_key_file\", \"testdata/server1.key\", \"The TLS key file\")\n\tport     = flag.Int(\"port\", 10000, \"The server port\")\n)\n\ntype testServer struct {\n}\n\nfunc (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {\n\treturn new(testpb.Empty), nil\n}\n\nfunc newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) {\n\tif size < 0 {\n\t\treturn nil, fmt.Errorf(\"requested a response with invalid length %d\", size)\n\t}\n\tbody := make([]byte, size)\n\tswitch t {\n\tcase testpb.PayloadType_COMPRESSABLE:\n\tcase testpb.PayloadType_UNCOMPRESSABLE:\n\t\treturn nil, fmt.Errorf(\"payloadType UNCOMPRESSABLE is not supported\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported payload type: %d\", t)\n\t}\n\treturn &testpb.Payload{\n\t\tType: t.Enum(),\n\t\tBody: body,\n\t}, nil\n}\n\nfunc (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {\n\tpl, err := newPayload(in.GetResponseType(), in.GetResponseSize())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &testpb.SimpleResponse{\n\t\tPayload: pl,\n\t}, nil\n}\n\nfunc (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {\n\tcs := args.GetResponseParameters()\n\tfor _, c := range cs {\n\t\tif us := c.GetIntervalUs(); us > 0 {\n\t\t\ttime.Sleep(time.Duration(us) * time.Microsecond)\n\t\t}\n\t\tpl, err := newPayload(args.GetResponseType(), c.GetSize())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := stream.Send(&testpb.StreamingOutputCallResponse{\n\t\t\tPayload: pl,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {\n\tvar sum int\n\tfor {\n\t\tin, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn stream.SendAndClose(&testpb.StreamingInputCallResponse{\n\t\t\t\tAggregatedPayloadSize: proto.Int32(int32(sum)),\n\t\t\t})\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp := in.GetPayload().GetBody()\n\t\tsum += len(p)\n\t}\n}\n\nfunc (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {\n\tfor {\n\t\tin, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\t// read done.\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcs := in.GetResponseParameters()\n\t\tfor _, c := range cs {\n\t\t\tif us := c.GetIntervalUs(); us > 0 {\n\t\t\t\ttime.Sleep(time.Duration(us) * time.Microsecond)\n\t\t\t}\n\t\t\tpl, err := newPayload(in.GetResponseType(), c.GetSize())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := stream.Send(&testpb.StreamingOutputCallResponse{\n\t\t\t\tPayload: pl,\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error {\n\tvar msgBuf []*testpb.StreamingOutputCallRequest\n\tfor {\n\t\tin, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\t// read done.\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmsgBuf = append(msgBuf, in)\n\t}\n\tfor _, m := range msgBuf {\n\t\tcs := m.GetResponseParameters()\n\t\tfor _, c := range cs {\n\t\t\tif us := c.GetIntervalUs(); us > 0 {\n\t\t\t\ttime.Sleep(time.Duration(us) * time.Microsecond)\n\t\t\t}\n\t\t\tpl, err := newPayload(m.GetResponseType(), c.GetSize())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := stream.Send(&testpb.StreamingOutputCallResponse{\n\t\t\t\tPayload: pl,\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\tp := strconv.Itoa(*port)\n\tlis, err := net.Listen(\"tcp\", \":\"+p)\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\tvar opts []grpc.ServerOption\n\tif *useTLS {\n\t\tcreds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)\n\t\tif err != nil {\n\t\t\tgrpclog.Fatalf(\"Failed to generate credentials %v\", err)\n\t\t}\n\t\topts = []grpc.ServerOption{grpc.Creds(creds)}\n\t}\n\tserver := grpc.NewServer(opts...)\n\ttestpb.RegisterTestServiceServer(server, &testServer{})\n\tserver.Serve(lis)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/metadata/metadata.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n// Package metadata define the structure of the metadata supported by gRPC library.\npackage metadata\n\nimport (\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n)\n\nconst (\n\tbinHdrSuffix = \"-bin\"\n)\n\n// grpc-http2 requires ASCII header key and value (more detail can be found in\n// \"Requests\" subsection in go/grpc-http2).\nfunc isASCII(s string) bool {\n\tfor _, c := range s {\n\t\tif c > 127 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// encodeKeyValue encodes key and value qualified for transmission via gRPC.\n// Transmitting binary headers violates HTTP/2 spec.\n// TODO(zhaoq): Maybe check if k is ASCII also.\nfunc encodeKeyValue(k, v string) (string, string) {\n\tif isASCII(v) {\n\t\treturn k, v\n\t}\n\tkey := k + binHdrSuffix\n\tval := base64.StdEncoding.EncodeToString([]byte(v))\n\treturn key, string(val)\n}\n\n// DecodeKeyValue returns the original key and value corresponding to the\n// encoded data in k, v.\nfunc DecodeKeyValue(k, v string) (string, string, error) {\n\tif !strings.HasSuffix(k, binHdrSuffix) {\n\t\treturn k, v, nil\n\t}\n\tkey := k[:len(k)-len(binHdrSuffix)]\n\tval, err := base64.StdEncoding.DecodeString(v)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn key, string(val), nil\n}\n\n// MD is a mapping from metadata keys to values. Users should use the following\n// two convenience functions New and Pairs to generate MD.\ntype MD map[string]string\n\n// New creates a MD from given key-value map.\nfunc New(m map[string]string) MD {\n\tmd := MD{}\n\tfor k, v := range m {\n\t\tkey, val := encodeKeyValue(k, v)\n\t\tmd[key] = val\n\t}\n\treturn md\n}\n\n// Pairs returns an MD formed by the mapping of key, value ...\n// Pairs panics if len(kv) is odd.\nfunc Pairs(kv ...string) MD {\n\tif len(kv)%2 == 1 {\n\t\tpanic(fmt.Sprintf(\"metadata: Pairs got the odd number of input pairs for metadata: %d\", len(kv)))\n\t}\n\tmd := MD{}\n\tvar k string\n\tfor i, s := range kv {\n\t\tif i%2 == 0 {\n\t\t\tk = s\n\t\t\tcontinue\n\t\t}\n\t\tkey, val := encodeKeyValue(k, s)\n\t\tmd[key] = val\n\t}\n\treturn md\n}\n\n// Len returns the number of items in md.\nfunc (md MD) Len() int {\n\treturn len(md)\n}\n\n// Copy returns a copy of md.\nfunc (md MD) Copy() MD {\n\tout := MD{}\n\tfor k, v := range md {\n\t\tout[k] = v\n\t}\n\treturn out\n}\n\ntype mdKey struct{}\n\n// NewContext creates a new context with md attached.\nfunc NewContext(ctx context.Context, md MD) context.Context {\n\treturn context.WithValue(ctx, mdKey{}, md)\n}\n\n// FromContext returns the MD in ctx if it exists.\nfunc FromContext(ctx context.Context) (md MD, ok bool) {\n\tmd, ok = ctx.Value(mdKey{}).(MD)\n\treturn\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/rpc_util.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage grpc\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/transport\"\n)\n\n// Codec defines the interface gRPC uses to encode and decode messages.\ntype Codec interface {\n\t// Marshal returns the wire format of v.\n\tMarshal(v interface{}) ([]byte, error)\n\t// Unmarshal parses the wire format into v.\n\tUnmarshal(data []byte, v interface{}) error\n\t// String returns the name of the Codec implementation. The returned\n\t// string will be used as part of content type in transmission.\n\tString() string\n}\n\n// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC.\ntype protoCodec struct{}\n\nfunc (protoCodec) Marshal(v interface{}) ([]byte, error) {\n\treturn proto.Marshal(v.(proto.Message))\n}\n\nfunc (protoCodec) Unmarshal(data []byte, v interface{}) error {\n\treturn proto.Unmarshal(data, v.(proto.Message))\n}\n\nfunc (protoCodec) String() string {\n\treturn \"proto\"\n}\n\n// CallOption configures a Call before it starts or extracts information from\n// a Call after it completes.\ntype CallOption interface {\n\t// before is called before the call is sent to any server.  If before\n\t// returns a non-nil error, the RPC fails with that error.\n\tbefore(*callInfo) error\n\n\t// after is called after the call has completed.  after cannot return an\n\t// error, so any failures should be reported via output parameters.\n\tafter(*callInfo)\n}\n\ntype beforeCall func(c *callInfo) error\n\nfunc (o beforeCall) before(c *callInfo) error { return o(c) }\nfunc (o beforeCall) after(c *callInfo)        {}\n\ntype afterCall func(c *callInfo)\n\nfunc (o afterCall) before(c *callInfo) error { return nil }\nfunc (o afterCall) after(c *callInfo)        { o(c) }\n\n// Header returns a CallOptions that retrieves the header metadata\n// for a unary RPC.\nfunc Header(md *metadata.MD) CallOption {\n\treturn afterCall(func(c *callInfo) {\n\t\t*md = c.headerMD\n\t})\n}\n\n// Trailer returns a CallOptions that retrieves the trailer metadata\n// for a unary RPC.\nfunc Trailer(md *metadata.MD) CallOption {\n\treturn afterCall(func(c *callInfo) {\n\t\t*md = c.trailerMD\n\t})\n}\n\n// The format of the payload: compressed or not?\ntype payloadFormat uint8\n\nconst (\n\tcompressionNone payloadFormat = iota // no compression\n\tcompressionFlate\n\t// More formats\n)\n\n// parser reads complelete gRPC messages from the underlying reader.\ntype parser struct {\n\ts io.Reader\n}\n\n// msgFixedHeader defines the header of a gRPC message (go/grpc-wirefmt).\ntype msgFixedHeader struct {\n\tT      payloadFormat\n\tLength uint32\n}\n\n// recvMsg is to read a complete gRPC message from the stream. It is blocking if\n// the message has not been complete yet. It returns the message and its type,\n// EOF is returned with nil msg and 0 pf if the entire stream is done. Other\n// non-nil error is returned if something is wrong on reading.\nfunc (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {\n\tvar hdr msgFixedHeader\n\tif err := binary.Read(p.s, binary.BigEndian, &hdr); err != nil {\n\t\treturn 0, nil, err\n\t}\n\tif hdr.Length == 0 {\n\t\treturn hdr.T, nil, nil\n\t}\n\tmsg = make([]byte, int(hdr.Length))\n\tif _, err := io.ReadFull(p.s, msg); err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\treturn 0, nil, err\n\t}\n\treturn hdr.T, msg, nil\n}\n\n// encode serializes msg and prepends the message header. If msg is nil, it\n// generates the message header of 0 message length.\nfunc encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {\n\tvar buf bytes.Buffer\n\t// Write message fixed header.\n\tbuf.WriteByte(uint8(pf))\n\tvar b []byte\n\tvar length uint32\n\tif msg != nil {\n\t\tvar err error\n\t\t// TODO(zhaoq): optimize to reduce memory alloc and copying.\n\t\tb, err = c.Marshal(msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlength = uint32(len(b))\n\t}\n\tvar szHdr [4]byte\n\tbinary.BigEndian.PutUint32(szHdr[:], length)\n\tbuf.Write(szHdr[:])\n\tbuf.Write(b)\n\treturn buf.Bytes(), nil\n}\n\nfunc recv(p *parser, c Codec, m interface{}) error {\n\tpf, d, err := p.recvMsg()\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch pf {\n\tcase compressionNone:\n\t\tif err := c.Unmarshal(d, m); err != nil {\n\t\t\treturn Errorf(codes.Internal, \"grpc: %v\", err)\n\t\t}\n\tdefault:\n\t\treturn Errorf(codes.Internal, \"gprc: compression is not supported yet.\")\n\t}\n\treturn nil\n}\n\n// rpcError defines the status from an RPC.\ntype rpcError struct {\n\tcode codes.Code\n\tdesc string\n}\n\nfunc (e rpcError) Error() string {\n\treturn fmt.Sprintf(\"rpc error: code = %d desc = %q\", e.code, e.desc)\n}\n\n// Code returns the error code for err if it was produced by the rpc system.\n// Otherwise, it returns codes.Unknown.\nfunc Code(err error) codes.Code {\n\tif err == nil {\n\t\treturn codes.OK\n\t}\n\tif e, ok := err.(rpcError); ok {\n\t\treturn e.code\n\t}\n\treturn codes.Unknown\n}\n\n// Errorf returns an error containing an error code and a description;\n// Errorf returns nil if c is OK.\nfunc Errorf(c codes.Code, format string, a ...interface{}) error {\n\tif c == codes.OK {\n\t\treturn nil\n\t}\n\treturn rpcError{\n\t\tcode: c,\n\t\tdesc: fmt.Sprintf(format, a...),\n\t}\n}\n\n// toRPCErr converts an error into a rpcError.\nfunc toRPCErr(err error) error {\n\tswitch e := err.(type) {\n\tcase transport.StreamError:\n\t\treturn rpcError{\n\t\t\tcode: e.Code,\n\t\t\tdesc: e.Desc,\n\t\t}\n\tcase transport.ConnectionError:\n\t\treturn rpcError{\n\t\t\tcode: codes.Internal,\n\t\t\tdesc: e.Desc,\n\t\t}\n\t}\n\treturn Errorf(codes.Unknown, \"%v\", err)\n}\n\n// convertCode converts a standard Go error into its canonical code. Note that\n// this is only used to translate the error returned by the server applications.\nfunc convertCode(err error) codes.Code {\n\tswitch err {\n\tcase nil:\n\t\treturn codes.OK\n\tcase io.EOF:\n\t\treturn codes.OutOfRange\n\tcase io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:\n\t\treturn codes.FailedPrecondition\n\tcase os.ErrInvalid:\n\t\treturn codes.InvalidArgument\n\tcase context.Canceled:\n\t\treturn codes.Canceled\n\tcase context.DeadlineExceeded:\n\t\treturn codes.DeadlineExceeded\n\t}\n\tswitch {\n\tcase os.IsExist(err):\n\t\treturn codes.AlreadyExists\n\tcase os.IsNotExist(err):\n\t\treturn codes.NotFound\n\tcase os.IsPermission(err):\n\t\treturn codes.PermissionDenied\n\t}\n\treturn codes.Unknown\n}\n\nconst (\n\t// how long to wait after the first failure before retrying\n\tbaseDelay = 1.0 * time.Second\n\t// upper bound on backoff delay\n\tmaxDelay      = 120 * time.Second\n\tbackoffFactor = 2.0 // backoff increases by this factor on each retry\n\tbackoffRange  = 0.4 // backoff is randomized downwards by this factor\n)\n\n// backoff returns a value in [0, maxDelay] that increases exponentially with\n// retries, starting from baseDelay.\nfunc backoff(retries int) time.Duration {\n\tbackoff, max := float64(baseDelay), float64(maxDelay)\n\tfor backoff < max && retries > 0 {\n\t\tbackoff = backoff * backoffFactor\n\t\tretries--\n\t}\n\tif backoff > max {\n\t\tbackoff = max\n\t}\n\n\t// Randomize backoff delays so that if a cluster of requests start at\n\t// the same time, they won't operate in lockstep.  We just subtract up\n\t// to 40% so that we obey maxDelay.\n\tbackoff -= backoff * backoffRange * rand.Float64()\n\tif backoff < 0 {\n\t\treturn 0\n\t}\n\treturn time.Duration(backoff)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/server.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage grpc\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/grpclog\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/transport\"\n)\n\ntype methodHandler func(srv interface{}, ctx context.Context, codec Codec, buf []byte) (interface{}, error)\n\n// MethodDesc represents an RPC service's method specification.\ntype MethodDesc struct {\n\tMethodName string\n\tHandler    methodHandler\n}\n\n// ServiceDesc represents an RPC service's specification.\ntype ServiceDesc struct {\n\tServiceName string\n\t// The pointer to the service interface. Used to check whether the user\n\t// provided implementation satisfies the interface requirements.\n\tHandlerType interface{}\n\tMethods     []MethodDesc\n\tStreams     []StreamDesc\n}\n\n// service consists of the information of the server serving this service and\n// the methods in this service.\ntype service struct {\n\tserver interface{} // the server for service methods\n\tmd     map[string]*MethodDesc\n\tsd     map[string]*StreamDesc\n}\n\n// Server is a gRPC server to serve RPC requests.\ntype Server struct {\n\topts  options\n\tmu    sync.Mutex\n\tlis   map[net.Listener]bool\n\tconns map[transport.ServerTransport]bool\n\tm     map[string]*service // service name -> service info\n}\n\ntype options struct {\n\tcreds                credentials.Credentials\n\tcodec                Codec\n\tmaxConcurrentStreams uint32\n}\n\n// A ServerOption sets options.\ntype ServerOption func(*options)\n\n// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.\nfunc CustomCodec(codec Codec) ServerOption {\n\treturn func(o *options) {\n\t\to.codec = codec\n\t}\n}\n\n// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number\n// of concurrent streams to each ServerTransport.\nfunc MaxConcurrentStreams(n uint32) ServerOption {\n\treturn func(o *options) {\n\t\to.maxConcurrentStreams = n\n\t}\n}\n\n// Creds returns a ServerOption that sets credentials for server connections.\nfunc Creds(c credentials.Credentials) ServerOption {\n\treturn func(o *options) {\n\t\to.creds = c\n\t}\n}\n\n// NewServer creates a gRPC server which has no service registered and has not\n// started to accept requests yet.\nfunc NewServer(opt ...ServerOption) *Server {\n\tvar opts options\n\tfor _, o := range opt {\n\t\to(&opts)\n\t}\n\tif opts.codec == nil {\n\t\t// Set the default codec.\n\t\topts.codec = protoCodec{}\n\t}\n\treturn &Server{\n\t\tlis:   make(map[net.Listener]bool),\n\t\topts:  opts,\n\t\tconns: make(map[transport.ServerTransport]bool),\n\t\tm:     make(map[string]*service),\n\t}\n}\n\n// RegisterService register a service and its implementation to the gRPC\n// server. Called from the IDL generated code. This must be called before\n// invoking Serve.\nfunc (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\t// Does some sanity checks.\n\tif _, ok := s.m[sd.ServiceName]; ok {\n\t\tgrpclog.Fatalf(\"grpc: Server.RegisterService found duplicate service registration for %q\", sd.ServiceName)\n\t}\n\tht := reflect.TypeOf(sd.HandlerType).Elem()\n\tst := reflect.TypeOf(ss)\n\tif !st.Implements(ht) {\n\t\tgrpclog.Fatalf(\"grpc: Server.RegisterService found the handler of type %v that does not satisfy %v\", st, ht)\n\t}\n\tsrv := &service{\n\t\tserver: ss,\n\t\tmd:     make(map[string]*MethodDesc),\n\t\tsd:     make(map[string]*StreamDesc),\n\t}\n\tfor i := range sd.Methods {\n\t\td := &sd.Methods[i]\n\t\tsrv.md[d.MethodName] = d\n\t}\n\tfor i := range sd.Streams {\n\t\td := &sd.Streams[i]\n\t\tsrv.sd[d.StreamName] = d\n\t}\n\ts.m[sd.ServiceName] = srv\n}\n\nvar (\n\t// ErrServerStopped indicates that the operation is now illegal because of\n\t// the server being stopped.\n\tErrServerStopped = errors.New(\"grpc: the server has been stopped\")\n)\n\n// Serve accepts incoming connections on the listener lis, creating a new\n// ServerTransport and service goroutine for each. The service goroutines\n// read gRPC request and then call the registered handlers to reply to them.\n// Service returns when lis.Accept fails.\nfunc (s *Server) Serve(lis net.Listener) error {\n\ts.mu.Lock()\n\tif s.lis == nil {\n\t\ts.mu.Unlock()\n\t\treturn ErrServerStopped\n\t}\n\ts.lis[lis] = true\n\ts.mu.Unlock()\n\tdefer func() {\n\t\tlis.Close()\n\t\ts.mu.Lock()\n\t\tdelete(s.lis, lis)\n\t\ts.mu.Unlock()\n\t}()\n\tfor {\n\t\tc, err := lis.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok {\n\t\t\tc, err = creds.ServerHandshake(c)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Println(\"grpc: Server.Serve failed to complete security handshake.\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\ts.mu.Lock()\n\t\tif s.conns == nil {\n\t\t\ts.mu.Unlock()\n\t\t\tc.Close()\n\t\t\treturn nil\n\t\t}\n\t\tst, err := transport.NewServerTransport(\"http2\", c, s.opts.maxConcurrentStreams)\n\t\tif err != nil {\n\t\t\ts.mu.Unlock()\n\t\t\tc.Close()\n\t\t\tgrpclog.Println(\"grpc: Server.Serve failed to create ServerTransport: \", err)\n\t\t\tcontinue\n\t\t}\n\t\ts.conns[st] = true\n\t\ts.mu.Unlock()\n\n\t\tgo func() {\n\t\t\tst.HandleStreams(func(stream *transport.Stream) {\n\t\t\t\ts.handleStream(st, stream)\n\t\t\t})\n\t\t\ts.mu.Lock()\n\t\t\tdelete(s.conns, st)\n\t\t\ts.mu.Unlock()\n\t\t}()\n\t}\n}\n\nfunc (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error {\n\tp, err := encode(s.opts.codec, msg, pf)\n\tif err != nil {\n\t\t// This typically indicates a fatal issue (e.g., memory\n\t\t// corruption or hardware faults) the application program\n\t\t// cannot handle.\n\t\t//\n\t\t// TODO(zhaoq): There exist other options also such as only closing the\n\t\t// faulty stream locally and remotely (Other streams can keep going). Find\n\t\t// the optimal option.\n\t\tgrpclog.Fatalf(\"grpc: Server failed to encode response %v\", err)\n\t}\n\treturn t.Write(stream, p, opts)\n}\n\nfunc (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) {\n\tp := &parser{s: stream}\n\tfor {\n\t\tpf, req, err := p.recvMsg()\n\t\tif err == io.EOF {\n\t\t\t// The entire stream is done (for unary RPC only).\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tswitch err := err.(type) {\n\t\t\tcase transport.ConnectionError:\n\t\t\t\t// Nothing to do here.\n\t\t\tcase transport.StreamError:\n\t\t\t\tif err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {\n\t\t\t\t\tgrpclog.Printf(\"grpc: Server.processUnaryRPC failed to write status: %v\", err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"grpc: Unexpected error (%T) from recvMsg: %v\", err, err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tswitch pf {\n\t\tcase compressionNone:\n\t\t\tstatusCode := codes.OK\n\t\t\tstatusDesc := \"\"\n\t\t\treply, appErr := md.Handler(srv.server, stream.Context(), s.opts.codec, req)\n\t\t\tif appErr != nil {\n\t\t\t\tif err, ok := appErr.(rpcError); ok {\n\t\t\t\t\tstatusCode = err.code\n\t\t\t\t\tstatusDesc = err.desc\n\t\t\t\t} else {\n\t\t\t\t\tstatusCode = convertCode(appErr)\n\t\t\t\t\tstatusDesc = appErr.Error()\n\t\t\t\t}\n\t\t\t\tif err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {\n\t\t\t\t\tgrpclog.Printf(\"grpc: Server.processUnaryRPC failed to write status: %v\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\topts := &transport.Options{\n\t\t\t\tLast:  true,\n\t\t\t\tDelay: false,\n\t\t\t}\n\t\t\tif err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil {\n\t\t\t\tif _, ok := err.(transport.ConnectionError); ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif e, ok := err.(transport.StreamError); ok {\n\t\t\t\t\tstatusCode = e.Code\n\t\t\t\t\tstatusDesc = e.Desc\n\t\t\t\t} else {\n\t\t\t\t\tstatusCode = codes.Unknown\n\t\t\t\t\tstatusDesc = err.Error()\n\t\t\t\t}\n\t\t\t}\n\t\t\tt.WriteStatus(stream, statusCode, statusDesc)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"payload format to be supported: %d\", pf))\n\t\t}\n\t}\n}\n\nfunc (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) {\n\tss := &serverStream{\n\t\tt:     t,\n\t\ts:     stream,\n\t\tp:     &parser{s: stream},\n\t\tcodec: s.opts.codec,\n\t}\n\tif appErr := sd.Handler(srv.server, ss); appErr != nil {\n\t\tif err, ok := appErr.(rpcError); ok {\n\t\t\tss.statusCode = err.code\n\t\t\tss.statusDesc = err.desc\n\t\t} else {\n\t\t\tss.statusCode = convertCode(appErr)\n\t\t\tss.statusDesc = appErr.Error()\n\t\t}\n\t}\n\tt.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)\n}\n\nfunc (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {\n\tsm := stream.Method()\n\tif sm != \"\" && sm[0] == '/' {\n\t\tsm = sm[1:]\n\t}\n\tpos := strings.LastIndex(sm, \"/\")\n\tif pos == -1 {\n\t\tif err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf(\"malformed method name: %q\", stream.Method())); err != nil {\n\t\t\tgrpclog.Printf(\"grpc: Server.handleStream failed to write status: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\tservice := sm[:pos]\n\tmethod := sm[pos+1:]\n\tsrv, ok := s.m[service]\n\tif !ok {\n\t\tif err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf(\"unknown service %v\", service)); err != nil {\n\t\t\tgrpclog.Printf(\"grpc: Server.handleStream failed to write status: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\t// Unary RPC or Streaming RPC?\n\tif md, ok := srv.md[method]; ok {\n\t\ts.processUnaryRPC(t, stream, srv, md)\n\t\treturn\n\t}\n\tif sd, ok := srv.sd[method]; ok {\n\t\ts.processStreamingRPC(t, stream, srv, sd)\n\t\treturn\n\t}\n\tif err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf(\"unknown method %v\", method)); err != nil {\n\t\tgrpclog.Printf(\"grpc: Server.handleStream failed to write status: %v\", err)\n\t}\n}\n\n// Stop stops the gRPC server. Once Stop returns, the server stops accepting\n// connection requests and closes all the connected connections.\nfunc (s *Server) Stop() {\n\ts.mu.Lock()\n\tlisteners := s.lis\n\ts.lis = nil\n\tcs := s.conns\n\ts.conns = nil\n\ts.mu.Unlock()\n\tfor lis := range listeners {\n\t\tlis.Close()\n\t}\n\tfor c := range cs {\n\t\tc.Close()\n\t}\n}\n\n// TestingCloseConns closes all exiting transports but keeps s.lis accepting new\n// connections. This is for test only now.\nfunc (s *Server) TestingCloseConns() {\n\ts.mu.Lock()\n\tfor c := range s.conns {\n\t\tc.Close()\n\t}\n\ts.conns = make(map[transport.ServerTransport]bool)\n\ts.mu.Unlock()\n}\n\n// SendHeader sends header metadata. It may be called at most once from a unary\n// RPC handler. The ctx is the RPC handler's Context or one derived from it.\nfunc SendHeader(ctx context.Context, md metadata.MD) error {\n\tif md.Len() == 0 {\n\t\treturn nil\n\t}\n\tstream, ok := transport.StreamFromContext(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"grpc: failed to fetch the stream from the context %v\", ctx)\n\t}\n\tt := stream.ServerTransport()\n\tif t == nil {\n\t\tgrpclog.Fatalf(\"grpc: SendHeader: %v has no ServerTransport to send header metadata.\", stream)\n\t}\n\treturn t.WriteHeader(stream, md)\n}\n\n// SetTrailer sets the trailer metadata that will be sent when an RPC returns.\n// It may be called at most once from a unary RPC handler. The ctx is the RPC\n// handler's Context or one derived from it.\nfunc SetTrailer(ctx context.Context, md metadata.MD) error {\n\tif md.Len() == 0 {\n\t\treturn nil\n\t}\n\tstream, ok := transport.StreamFromContext(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"grpc: failed to fetch the stream from the context %v\", ctx)\n\t}\n\treturn stream.SetTrailer(md)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/stream.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage grpc\n\nimport (\n\t\"errors\"\n\t\"io\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/transport\"\n)\n\ntype streamHandler func(srv interface{}, stream ServerStream) error\n\n// StreamDesc represents a streaming RPC service's method specification.\ntype StreamDesc struct {\n\tStreamName string\n\tHandler    streamHandler\n\n\t// At least one of these is true.\n\tServerStreams bool\n\tClientStreams bool\n}\n\n// Stream defines the common interface a client or server stream has to satisfy.\ntype Stream interface {\n\t// Context returns the context for this stream.\n\tContext() context.Context\n\t// SendMsg blocks until it sends m, the stream is done or the stream\n\t// breaks.\n\t// On error, it aborts the stream and returns an RPC status on client\n\t// side. On server side, it simply returns the error to the caller.\n\t// SendMsg is called by generated code.\n\tSendMsg(m interface{}) error\n\t// RecvMsg blocks until it receives a message or the stream is\n\t// done. On client side, it returns io.EOF when the stream is done. On\n\t// any other error, it aborts the streama nd returns an RPC status. On\n\t// server side, it simply returns the error to the caller.\n\tRecvMsg(m interface{}) error\n}\n\n// ClientStream defines the interface a client stream has to satify.\ntype ClientStream interface {\n\t// Header returns the header metedata received from the server if there\n\t// is any. It blocks if the metadata is not ready to read.\n\tHeader() (metadata.MD, error)\n\t// Trailer returns the trailer metadata from the server. It must be called\n\t// after stream.Recv() returns non-nil error (including io.EOF) for\n\t// bi-directional streaming and server streaming or stream.CloseAndRecv()\n\t// returns for client streaming in order to receive trailer metadata if\n\t// present. Otherwise, it could returns an empty MD even though trailer\n\t// is present.\n\tTrailer() metadata.MD\n\t// CloseSend closes the send direction of the stream. It closes the stream\n\t// when non-nil error is met.\n\tCloseSend() error\n\tStream\n}\n\n// NewClientStream creates a new Stream for the client side. This is called\n// by generated code.\nfunc NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {\n\t// TODO(zhaoq): CallOption is omitted. Add support when it is needed.\n\tcallHdr := &transport.CallHdr{\n\t\tHost:   cc.authority,\n\t\tMethod: method,\n\t}\n\tt, _, err := cc.wait(ctx, 0)\n\tif err != nil {\n\t\treturn nil, toRPCErr(err)\n\t}\n\ts, err := t.NewStream(ctx, callHdr)\n\tif err != nil {\n\t\treturn nil, toRPCErr(err)\n\t}\n\treturn &clientStream{\n\t\tt:     t,\n\t\ts:     s,\n\t\tp:     &parser{s: s},\n\t\tdesc:  desc,\n\t\tcodec: cc.dopts.codec,\n\t}, nil\n}\n\n// clientStream implements a client side Stream.\ntype clientStream struct {\n\tt     transport.ClientTransport\n\ts     *transport.Stream\n\tp     *parser\n\tdesc  *StreamDesc\n\tcodec Codec\n}\n\nfunc (cs *clientStream) Context() context.Context {\n\treturn cs.s.Context()\n}\n\nfunc (cs *clientStream) Header() (metadata.MD, error) {\n\tm, err := cs.s.Header()\n\tif err != nil {\n\t\tif _, ok := err.(transport.ConnectionError); !ok {\n\t\t\tcs.t.CloseStream(cs.s, err)\n\t\t}\n\t}\n\treturn m, err\n}\n\nfunc (cs *clientStream) Trailer() metadata.MD {\n\treturn cs.s.Trailer()\n}\n\nfunc (cs *clientStream) SendMsg(m interface{}) (err error) {\n\tdefer func() {\n\t\tif err == nil || err == io.EOF {\n\t\t\treturn\n\t\t}\n\t\tif _, ok := err.(transport.ConnectionError); !ok {\n\t\t\tcs.t.CloseStream(cs.s, err)\n\t\t}\n\t\terr = toRPCErr(err)\n\t}()\n\tout, err := encode(cs.codec, m, compressionNone)\n\tif err != nil {\n\t\treturn transport.StreamErrorf(codes.Internal, \"grpc: %v\", err)\n\t}\n\treturn cs.t.Write(cs.s, out, &transport.Options{Last: false})\n}\n\nfunc (cs *clientStream) RecvMsg(m interface{}) (err error) {\n\terr = recv(cs.p, cs.codec, m)\n\tif err == nil {\n\t\tif !cs.desc.ClientStreams || cs.desc.ServerStreams {\n\t\t\treturn\n\t\t}\n\t\t// Special handling for client streaming rpc.\n\t\terr = recv(cs.p, cs.codec, m)\n\t\tcs.t.CloseStream(cs.s, err)\n\t\tif err == nil {\n\t\t\treturn toRPCErr(errors.New(\"grpc: client streaming protocol violation: get <nil>, want <EOF>\"))\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tif cs.s.StatusCode() == codes.OK {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn Errorf(cs.s.StatusCode(), cs.s.StatusDesc())\n\t\t}\n\t\treturn toRPCErr(err)\n\t}\n\tif _, ok := err.(transport.ConnectionError); !ok {\n\t\tcs.t.CloseStream(cs.s, err)\n\t}\n\tif err == io.EOF {\n\t\tif cs.s.StatusCode() == codes.OK {\n\t\t\t// Returns io.EOF to indicate the end of the stream.\n\t\t\treturn\n\t\t}\n\t\treturn Errorf(cs.s.StatusCode(), cs.s.StatusDesc())\n\t}\n\treturn toRPCErr(err)\n}\n\nfunc (cs *clientStream) CloseSend() (err error) {\n\terr = cs.t.Write(cs.s, nil, &transport.Options{Last: true})\n\tif err == nil || err == io.EOF {\n\t\treturn\n\t}\n\tif _, ok := err.(transport.ConnectionError); !ok {\n\t\tcs.t.CloseStream(cs.s, err)\n\t}\n\terr = toRPCErr(err)\n\treturn\n}\n\n// ServerStream defines the interface a server stream has to satisfy.\ntype ServerStream interface {\n\t// SendHeader sends the header metadata. It should not be called\n\t// after SendProto. It fails if called multiple times or if\n\t// called after SendProto.\n\tSendHeader(metadata.MD) error\n\t// SetTrailer sets the trailer metadata which will be sent with the\n\t// RPC status.\n\tSetTrailer(metadata.MD)\n\tStream\n}\n\n// serverStream implements a server side Stream.\ntype serverStream struct {\n\tt          transport.ServerTransport\n\ts          *transport.Stream\n\tp          *parser\n\tcodec      Codec\n\tstatusCode codes.Code\n\tstatusDesc string\n}\n\nfunc (ss *serverStream) Context() context.Context {\n\treturn ss.s.Context()\n}\n\nfunc (ss *serverStream) SendHeader(md metadata.MD) error {\n\treturn ss.t.WriteHeader(ss.s, md)\n}\n\nfunc (ss *serverStream) SetTrailer(md metadata.MD) {\n\tif md.Len() == 0 {\n\t\treturn\n\t}\n\tss.s.SetTrailer(md)\n\treturn\n}\n\nfunc (ss *serverStream) SendMsg(m interface{}) error {\n\tout, err := encode(ss.codec, m, compressionNone)\n\tif err != nil {\n\t\terr = transport.StreamErrorf(codes.Internal, \"grpc: %v\", err)\n\t\treturn err\n\t}\n\treturn ss.t.Write(ss.s, out, &transport.Options{Last: false})\n}\n\nfunc (ss *serverStream) RecvMsg(m interface{}) error {\n\treturn recv(ss.p, ss.codec, m)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: perf.proto\n// DO NOT EDIT!\n\n/*\nPackage codec_perf is a generated protocol buffer package.\n\nIt is generated from these files:\n\tperf.proto\n\nIt has these top-level messages:\n\tBuffer\n*/\npackage codec_perf\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = math.Inf\n\n// Buffer is a message that contains a body of bytes that is used to exercise\n// encoding and decoding overheads.\ntype Buffer struct {\n\tBody             []byte `protobuf:\"bytes,1,opt,name=body\" json:\"body,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Buffer) Reset()         { *m = Buffer{} }\nfunc (m *Buffer) String() string { return proto.CompactTextString(m) }\nfunc (*Buffer) ProtoMessage()    {}\n\nfunc (m *Buffer) GetBody() []byte {\n\tif m != nil {\n\t\treturn m.Body\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/test/codec_perf/perf.proto",
    "content": "// Messages used for performance tests that may not reference grpc directly for\n// reasons of import cycles.\nsyntax = \"proto2\";\n\npackage codec.perf;\n\n// Buffer is a message that contains a body of bytes that is used to exercise\n// encoding and decoding overheads.\nmessage Buffer {\n  optional bytes body = 1;\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: src/google.golang.org/grpc/test/grpc_testing/test.proto\n// DO NOT EDIT!\n\n/*\nPackage grpc_testing is a generated protocol buffer package.\n\nIt is generated from these files:\n\tsrc/google.golang.org/grpc/test/grpc_testing/test.proto\n\nIt has these top-level messages:\n\tEmpty\n\tPayload\n\tSimpleRequest\n\tSimpleResponse\n\tStreamingInputCallRequest\n\tStreamingInputCallResponse\n\tResponseParameters\n\tStreamingOutputCallRequest\n\tStreamingOutputCallResponse\n*/\npackage grpc_testing\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport math \"math\"\n\nimport (\n\tcontext \"golang.org/x/net/context\"\n\tgrpc \"google.golang.org/grpc\"\n)\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ context.Context\nvar _ grpc.ClientConn\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = math.Inf\n\n// The type of payload that should be returned.\ntype PayloadType int32\n\nconst (\n\t// Compressable text format.\n\tPayloadType_COMPRESSABLE PayloadType = 0\n\t// Uncompressable binary format.\n\tPayloadType_UNCOMPRESSABLE PayloadType = 1\n\t// Randomly chosen from all other formats defined in this enum.\n\tPayloadType_RANDOM PayloadType = 2\n)\n\nvar PayloadType_name = map[int32]string{\n\t0: \"COMPRESSABLE\",\n\t1: \"UNCOMPRESSABLE\",\n\t2: \"RANDOM\",\n}\nvar PayloadType_value = map[string]int32{\n\t\"COMPRESSABLE\":   0,\n\t\"UNCOMPRESSABLE\": 1,\n\t\"RANDOM\":         2,\n}\n\nfunc (x PayloadType) Enum() *PayloadType {\n\tp := new(PayloadType)\n\t*p = x\n\treturn p\n}\nfunc (x PayloadType) String() string {\n\treturn proto.EnumName(PayloadType_name, int32(x))\n}\nfunc (x *PayloadType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(PayloadType_value, data, \"PayloadType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = PayloadType(value)\n\treturn nil\n}\n\ntype Empty struct {\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Empty) Reset()         { *m = Empty{} }\nfunc (m *Empty) String() string { return proto.CompactTextString(m) }\nfunc (*Empty) ProtoMessage()    {}\n\n// A block of data, to simply increase gRPC message size.\ntype Payload struct {\n\t// The type of data in body.\n\tType *PayloadType `protobuf:\"varint,1,opt,name=type,enum=grpc.testing.PayloadType\" json:\"type,omitempty\"`\n\t// Primary contents of payload.\n\tBody             []byte `protobuf:\"bytes,2,opt,name=body\" json:\"body,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Payload) Reset()         { *m = Payload{} }\nfunc (m *Payload) String() string { return proto.CompactTextString(m) }\nfunc (*Payload) ProtoMessage()    {}\n\nfunc (m *Payload) GetType() PayloadType {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn PayloadType_COMPRESSABLE\n}\n\nfunc (m *Payload) GetBody() []byte {\n\tif m != nil {\n\t\treturn m.Body\n\t}\n\treturn nil\n}\n\n// Unary request.\ntype SimpleRequest struct {\n\t// Desired payload type in the response from the server.\n\t// If response_type is RANDOM, server randomly chooses one from other formats.\n\tResponseType *PayloadType `protobuf:\"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType\" json:\"response_type,omitempty\"`\n\t// Desired payload size in the response from the server.\n\t// If response_type is COMPRESSABLE, this denotes the size before compression.\n\tResponseSize *int32 `protobuf:\"varint,2,opt,name=response_size\" json:\"response_size,omitempty\"`\n\t// Optional input payload sent along with the request.\n\tPayload *Payload `protobuf:\"bytes,3,opt,name=payload\" json:\"payload,omitempty\"`\n\t// Whether SimpleResponse should include username.\n\tFillUsername *bool `protobuf:\"varint,4,opt,name=fill_username\" json:\"fill_username,omitempty\"`\n\t// Whether SimpleResponse should include OAuth scope.\n\tFillOauthScope   *bool  `protobuf:\"varint,5,opt,name=fill_oauth_scope\" json:\"fill_oauth_scope,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SimpleRequest) Reset()         { *m = SimpleRequest{} }\nfunc (m *SimpleRequest) String() string { return proto.CompactTextString(m) }\nfunc (*SimpleRequest) ProtoMessage()    {}\n\nfunc (m *SimpleRequest) GetResponseType() PayloadType {\n\tif m != nil && m.ResponseType != nil {\n\t\treturn *m.ResponseType\n\t}\n\treturn PayloadType_COMPRESSABLE\n}\n\nfunc (m *SimpleRequest) GetResponseSize() int32 {\n\tif m != nil && m.ResponseSize != nil {\n\t\treturn *m.ResponseSize\n\t}\n\treturn 0\n}\n\nfunc (m *SimpleRequest) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc (m *SimpleRequest) GetFillUsername() bool {\n\tif m != nil && m.FillUsername != nil {\n\t\treturn *m.FillUsername\n\t}\n\treturn false\n}\n\nfunc (m *SimpleRequest) GetFillOauthScope() bool {\n\tif m != nil && m.FillOauthScope != nil {\n\t\treturn *m.FillOauthScope\n\t}\n\treturn false\n}\n\n// Unary response, as configured by the request.\ntype SimpleResponse struct {\n\t// Payload to increase message size.\n\tPayload *Payload `protobuf:\"bytes,1,opt,name=payload\" json:\"payload,omitempty\"`\n\t// The user the request came from, for verifying authentication was\n\t// successful when the client expected it.\n\tUsername *string `protobuf:\"bytes,2,opt,name=username\" json:\"username,omitempty\"`\n\t// OAuth scope.\n\tOauthScope       *string `protobuf:\"bytes,3,opt,name=oauth_scope\" json:\"oauth_scope,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SimpleResponse) Reset()         { *m = SimpleResponse{} }\nfunc (m *SimpleResponse) String() string { return proto.CompactTextString(m) }\nfunc (*SimpleResponse) ProtoMessage()    {}\n\nfunc (m *SimpleResponse) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc (m *SimpleResponse) GetUsername() string {\n\tif m != nil && m.Username != nil {\n\t\treturn *m.Username\n\t}\n\treturn \"\"\n}\n\nfunc (m *SimpleResponse) GetOauthScope() string {\n\tif m != nil && m.OauthScope != nil {\n\t\treturn *m.OauthScope\n\t}\n\treturn \"\"\n}\n\n// Client-streaming request.\ntype StreamingInputCallRequest struct {\n\t// Optional input payload sent along with the request.\n\tPayload          *Payload `protobuf:\"bytes,1,opt,name=payload\" json:\"payload,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *StreamingInputCallRequest) Reset()         { *m = StreamingInputCallRequest{} }\nfunc (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingInputCallRequest) ProtoMessage()    {}\n\nfunc (m *StreamingInputCallRequest) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\n// Client-streaming response.\ntype StreamingInputCallResponse struct {\n\t// Aggregated size of payloads received from the client.\n\tAggregatedPayloadSize *int32 `protobuf:\"varint,1,opt,name=aggregated_payload_size\" json:\"aggregated_payload_size,omitempty\"`\n\tXXX_unrecognized      []byte `json:\"-\"`\n}\n\nfunc (m *StreamingInputCallResponse) Reset()         { *m = StreamingInputCallResponse{} }\nfunc (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingInputCallResponse) ProtoMessage()    {}\n\nfunc (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 {\n\tif m != nil && m.AggregatedPayloadSize != nil {\n\t\treturn *m.AggregatedPayloadSize\n\t}\n\treturn 0\n}\n\n// Configuration for a particular response.\ntype ResponseParameters struct {\n\t// Desired payload sizes in responses from the server.\n\t// If response_type is COMPRESSABLE, this denotes the size before compression.\n\tSize *int32 `protobuf:\"varint,1,opt,name=size\" json:\"size,omitempty\"`\n\t// Desired interval between consecutive responses in the response stream in\n\t// microseconds.\n\tIntervalUs       *int32 `protobuf:\"varint,2,opt,name=interval_us\" json:\"interval_us,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *ResponseParameters) Reset()         { *m = ResponseParameters{} }\nfunc (m *ResponseParameters) String() string { return proto.CompactTextString(m) }\nfunc (*ResponseParameters) ProtoMessage()    {}\n\nfunc (m *ResponseParameters) GetSize() int32 {\n\tif m != nil && m.Size != nil {\n\t\treturn *m.Size\n\t}\n\treturn 0\n}\n\nfunc (m *ResponseParameters) GetIntervalUs() int32 {\n\tif m != nil && m.IntervalUs != nil {\n\t\treturn *m.IntervalUs\n\t}\n\treturn 0\n}\n\n// Server-streaming request.\ntype StreamingOutputCallRequest struct {\n\t// Desired payload type in the response from the server.\n\t// If response_type is RANDOM, the payload from each response in the stream\n\t// might be of different types. This is to simulate a mixed type of payload\n\t// stream.\n\tResponseType *PayloadType `protobuf:\"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType\" json:\"response_type,omitempty\"`\n\t// Configuration for each expected response message.\n\tResponseParameters []*ResponseParameters `protobuf:\"bytes,2,rep,name=response_parameters\" json:\"response_parameters,omitempty\"`\n\t// Optional input payload sent along with the request.\n\tPayload          *Payload `protobuf:\"bytes,3,opt,name=payload\" json:\"payload,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *StreamingOutputCallRequest) Reset()         { *m = StreamingOutputCallRequest{} }\nfunc (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingOutputCallRequest) ProtoMessage()    {}\n\nfunc (m *StreamingOutputCallRequest) GetResponseType() PayloadType {\n\tif m != nil && m.ResponseType != nil {\n\t\treturn *m.ResponseType\n\t}\n\treturn PayloadType_COMPRESSABLE\n}\n\nfunc (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters {\n\tif m != nil {\n\t\treturn m.ResponseParameters\n\t}\n\treturn nil\n}\n\nfunc (m *StreamingOutputCallRequest) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\n// Server-streaming response, as configured by the request and parameters.\ntype StreamingOutputCallResponse struct {\n\t// Payload to increase response size.\n\tPayload          *Payload `protobuf:\"bytes,1,opt,name=payload\" json:\"payload,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *StreamingOutputCallResponse) Reset()         { *m = StreamingOutputCallResponse{} }\nfunc (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) }\nfunc (*StreamingOutputCallResponse) ProtoMessage()    {}\n\nfunc (m *StreamingOutputCallResponse) GetPayload() *Payload {\n\tif m != nil {\n\t\treturn m.Payload\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"grpc.testing.PayloadType\", PayloadType_name, PayloadType_value)\n}\n\n// Client API for TestService service\n\ntype TestServiceClient interface {\n\t// One empty request followed by one empty response.\n\tEmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tUnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error)\n\t// One request followed by a sequence of responses (streamed download).\n\t// The server returns the payload with client desired type and sizes.\n\tStreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error)\n\t// A sequence of requests followed by one response (streamed upload).\n\t// The server returns the aggregated size of client payload as the result.\n\tStreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error)\n\t// A sequence of requests with each request served by the server immediately.\n\t// As one request could lead to multiple responses, this interface\n\t// demonstrates the idea of full duplexing.\n\tFullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error)\n\t// A sequence of requests followed by a sequence of responses.\n\t// The server buffers all the client requests and then serves them in order. A\n\t// stream of responses are returned to the client when the server starts with\n\t// first request.\n\tHalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error)\n}\n\ntype testServiceClient struct {\n\tcc *grpc.ClientConn\n}\n\nfunc NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient {\n\treturn &testServiceClient{cc}\n}\n\nfunc (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {\n\tout := new(Empty)\n\terr := grpc.Invoke(ctx, \"/grpc.testing.TestService/EmptyCall\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) {\n\tout := new(SimpleResponse)\n\terr := grpc.Invoke(ctx, \"/grpc.testing.TestService/UnaryCall\", in, out, c.cc, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, \"/grpc.testing.TestService/StreamingOutputCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceStreamingOutputCallClient{stream}\n\tif err := x.ClientStream.SendMsg(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn x, nil\n}\n\ntype TestService_StreamingOutputCallClient interface {\n\tRecv() (*StreamingOutputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceStreamingOutputCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) {\n\tm := new(StreamingOutputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, \"/grpc.testing.TestService/StreamingInputCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceStreamingInputCallClient{stream}\n\treturn x, nil\n}\n\ntype TestService_StreamingInputCallClient interface {\n\tSend(*StreamingInputCallRequest) error\n\tCloseAndRecv() (*StreamingInputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceStreamingInputCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) {\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\tm := new(StreamingInputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, \"/grpc.testing.TestService/FullDuplexCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceFullDuplexCallClient{stream}\n\treturn x, nil\n}\n\ntype TestService_FullDuplexCallClient interface {\n\tSend(*StreamingOutputCallRequest) error\n\tRecv() (*StreamingOutputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceFullDuplexCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) {\n\tm := new(StreamingOutputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) {\n\tstream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, \"/grpc.testing.TestService/HalfDuplexCall\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &testServiceHalfDuplexCallClient{stream}\n\treturn x, nil\n}\n\ntype TestService_HalfDuplexCallClient interface {\n\tSend(*StreamingOutputCallRequest) error\n\tRecv() (*StreamingOutputCallResponse, error)\n\tgrpc.ClientStream\n}\n\ntype testServiceHalfDuplexCallClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) {\n\tm := new(StreamingOutputCallResponse)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Server API for TestService service\n\ntype TestServiceServer interface {\n\t// One empty request followed by one empty response.\n\tEmptyCall(context.Context, *Empty) (*Empty, error)\n\t// One request followed by one response.\n\t// The server returns the client payload as-is.\n\tUnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error)\n\t// One request followed by a sequence of responses (streamed download).\n\t// The server returns the payload with client desired type and sizes.\n\tStreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error\n\t// A sequence of requests followed by one response (streamed upload).\n\t// The server returns the aggregated size of client payload as the result.\n\tStreamingInputCall(TestService_StreamingInputCallServer) error\n\t// A sequence of requests with each request served by the server immediately.\n\t// As one request could lead to multiple responses, this interface\n\t// demonstrates the idea of full duplexing.\n\tFullDuplexCall(TestService_FullDuplexCallServer) error\n\t// A sequence of requests followed by a sequence of responses.\n\t// The server buffers all the client requests and then serves them in order. A\n\t// stream of responses are returned to the client when the server starts with\n\t// first request.\n\tHalfDuplexCall(TestService_HalfDuplexCallServer) error\n}\n\nfunc RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) {\n\ts.RegisterService(&_TestService_serviceDesc, srv)\n}\n\nfunc _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(Empty)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(TestServiceServer).EmptyCall(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {\n\tin := new(SimpleRequest)\n\tif err := codec.Unmarshal(buf, in); err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := srv.(TestServiceServer).UnaryCall(ctx, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\tm := new(StreamingOutputCallRequest)\n\tif err := stream.RecvMsg(m); err != nil {\n\t\treturn err\n\t}\n\treturn srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream})\n}\n\ntype TestService_StreamingOutputCallServer interface {\n\tSend(*StreamingOutputCallResponse) error\n\tgrpc.ServerStream\n}\n\ntype testServiceStreamingOutputCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream})\n}\n\ntype TestService_StreamingInputCallServer interface {\n\tSendAndClose(*StreamingInputCallResponse) error\n\tRecv() (*StreamingInputCallRequest, error)\n\tgrpc.ServerStream\n}\n\ntype testServiceStreamingInputCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) {\n\tm := new(StreamingInputCallRequest)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream})\n}\n\ntype TestService_FullDuplexCallServer interface {\n\tSend(*StreamingOutputCallResponse) error\n\tRecv() (*StreamingOutputCallRequest, error)\n\tgrpc.ServerStream\n}\n\ntype testServiceFullDuplexCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) {\n\tm := new(StreamingOutputCallRequest)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream})\n}\n\ntype TestService_HalfDuplexCallServer interface {\n\tSend(*StreamingOutputCallResponse) error\n\tRecv() (*StreamingOutputCallRequest, error)\n\tgrpc.ServerStream\n}\n\ntype testServiceHalfDuplexCallServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) {\n\tm := new(StreamingOutputCallRequest)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nvar _TestService_serviceDesc = grpc.ServiceDesc{\n\tServiceName: \"grpc.testing.TestService\",\n\tHandlerType: (*TestServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"EmptyCall\",\n\t\t\tHandler:    _TestService_EmptyCall_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"UnaryCall\",\n\t\t\tHandler:    _TestService_UnaryCall_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"StreamingOutputCall\",\n\t\t\tHandler:       _TestService_StreamingOutputCall_Handler,\n\t\t\tServerStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"StreamingInputCall\",\n\t\t\tHandler:       _TestService_StreamingInputCall_Handler,\n\t\t\tClientStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"FullDuplexCall\",\n\t\t\tHandler:       _TestService_FullDuplexCall_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t\t{\n\t\t\tStreamName:    \"HalfDuplexCall\",\n\t\t\tHandler:       _TestService_HalfDuplexCall_Handler,\n\t\t\tServerStreams: true,\n\t\t\tClientStreams: true,\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/test/grpc_testing/test.proto",
    "content": "// An integration test service that covers all the method signature permutations\n// of unary/streaming requests/responses.\nsyntax = \"proto2\";\n\npackage grpc.testing;\n\nmessage Empty {}\n\n// The type of payload that should be returned.\nenum PayloadType {\n  // Compressable text format.\n  COMPRESSABLE = 0;\n\n  // Uncompressable binary format.\n  UNCOMPRESSABLE = 1;\n\n  // Randomly chosen from all other formats defined in this enum.\n  RANDOM = 2;\n}\n\n// A block of data, to simply increase gRPC message size.\nmessage Payload {\n  // The type of data in body.\n  optional PayloadType type = 1;\n  // Primary contents of payload.\n  optional bytes body = 2;\n}\n\n// Unary request.\nmessage SimpleRequest {\n  // Desired payload type in the response from the server.\n  // If response_type is RANDOM, server randomly chooses one from other formats.\n  optional PayloadType response_type = 1;\n\n  // Desired payload size in the response from the server.\n  // If response_type is COMPRESSABLE, this denotes the size before compression.\n  optional int32 response_size = 2;\n\n  // Optional input payload sent along with the request.\n  optional Payload payload = 3;\n\n  // Whether SimpleResponse should include username.\n  optional bool fill_username = 4;\n\n  // Whether SimpleResponse should include OAuth scope.\n  optional bool fill_oauth_scope = 5;\n}\n\n// Unary response, as configured by the request.\nmessage SimpleResponse {\n  // Payload to increase message size.\n  optional Payload payload = 1;\n\n  // The user the request came from, for verifying authentication was\n  // successful when the client expected it.\n  optional string username = 2;\n  \n  // OAuth scope.\n  optional string oauth_scope = 3;\n}\n\n// Client-streaming request.\nmessage StreamingInputCallRequest {\n  // Optional input payload sent along with the request.\n  optional Payload payload = 1;\n\n  // Not expecting any payload from the response.\n}\n\n// Client-streaming response.\nmessage StreamingInputCallResponse {\n  // Aggregated size of payloads received from the client.\n  optional int32 aggregated_payload_size = 1;\n}\n\n// Configuration for a particular response.\nmessage ResponseParameters {\n  // Desired payload sizes in responses from the server.\n  // If response_type is COMPRESSABLE, this denotes the size before compression.\n  optional int32 size = 1;\n\n  // Desired interval between consecutive responses in the response stream in\n  // microseconds.\n  optional int32 interval_us = 2;\n}\n\n// Server-streaming request.\nmessage StreamingOutputCallRequest {\n  // Desired payload type in the response from the server.\n  // If response_type is RANDOM, the payload from each response in the stream\n  // might be of different types. This is to simulate a mixed type of payload\n  // stream.\n  optional PayloadType response_type = 1;\n\n  // Configuration for each expected response message.\n  repeated ResponseParameters response_parameters = 2;\n\n  // Optional input payload sent along with the request.\n  optional Payload payload = 3;\n}\n\n// Server-streaming response, as configured by the request and parameters.\nmessage StreamingOutputCallResponse {\n  // Payload to increase response size.\n  optional Payload payload = 1;\n}\n\n// A simple service to test the various types of RPCs and experiment with\n// performance with various types of payload.\nservice TestService {\n  // One empty request followed by one empty response.\n  rpc EmptyCall(Empty) returns (Empty);\n\n  // One request followed by one response.\n  // The server returns the client payload as-is.\n  rpc UnaryCall(SimpleRequest) returns (SimpleResponse);\n\n  // One request followed by a sequence of responses (streamed download).\n  // The server returns the payload with client desired type and sizes.\n  rpc StreamingOutputCall(StreamingOutputCallRequest)\n      returns (stream StreamingOutputCallResponse);\n\n  // A sequence of requests followed by one response (streamed upload).\n  // The server returns the aggregated size of client payload as the result.\n  rpc StreamingInputCall(stream StreamingInputCallRequest)\n      returns (StreamingInputCallResponse);\n\n  // A sequence of requests with each request served by the server immediately.\n  // As one request could lead to multiple responses, this interface\n  // demonstrates the idea of full duplexing.\n  rpc FullDuplexCall(stream StreamingOutputCallRequest)\n      returns (stream StreamingOutputCallResponse);\n\n  // A sequence of requests followed by a sequence of responses.\n  // The server buffers all the client requests and then serves them in order. A\n  // stream of responses are returned to the client when the server starts with\n  // first request.\n  rpc HalfDuplexCall(stream StreamingOutputCallRequest)\n      returns (stream StreamingOutputCallResponse);\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/transport/control.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage transport\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/bradfitz/http2\"\n)\n\nconst (\n\t// The default value of flow control window size in HTTP2 spec.\n\tdefaultWindowSize = 65535\n\t// The initial window size for flow control.\n\tinitialWindowSize     = defaultWindowSize      // for an RPC\n\tinitialConnWindowSize = defaultWindowSize * 16 // for a connection\n)\n\n// The following defines various control items which could flow through\n// the control buffer of transport. They represent different aspects of\n// control tasks, e.g., flow control, settings, streaming resetting, etc.\ntype windowUpdate struct {\n\tstreamID  uint32\n\tincrement uint32\n}\n\nfunc (windowUpdate) isItem() bool {\n\treturn true\n}\n\ntype settings struct {\n\tack     bool\n\tsetting []http2.Setting\n}\n\nfunc (settings) isItem() bool {\n\treturn true\n}\n\ntype resetStream struct {\n\tstreamID uint32\n\tcode     http2.ErrCode\n}\n\nfunc (resetStream) isItem() bool {\n\treturn true\n}\n\ntype flushIO struct {\n}\n\nfunc (flushIO) isItem() bool {\n\treturn true\n}\n\ntype ping struct {\n\tack bool\n}\n\nfunc (ping) isItem() bool {\n\treturn true\n}\n\n// quotaPool is a pool which accumulates the quota and sends it to acquire()\n// when it is available.\ntype quotaPool struct {\n\tc chan int\n\n\tmu    sync.Mutex\n\tquota int\n}\n\n// newQuotaPool creates a quotaPool which has quota q available to consume.\nfunc newQuotaPool(q int) *quotaPool {\n\tqb := &quotaPool{c: make(chan int, 1)}\n\tqb.c <- q\n\treturn qb\n}\n\n// add adds n to the available quota and tries to send it on acquire.\nfunc (qb *quotaPool) add(n int) {\n\tqb.mu.Lock()\n\tdefer qb.mu.Unlock()\n\tqb.quota += n\n\tif qb.quota <= 0 {\n\t\treturn\n\t}\n\tselect {\n\tcase qb.c <- qb.quota:\n\t\tqb.quota = 0\n\tdefault:\n\t}\n}\n\n// cancel cancels the pending quota sent on acquire, if any.\nfunc (qb *quotaPool) cancel() {\n\tqb.mu.Lock()\n\tdefer qb.mu.Unlock()\n\tselect {\n\tcase n := <-qb.c:\n\t\tqb.quota += n\n\tdefault:\n\t}\n}\n\n// reset cancels the pending quota sent on acquired, incremented by v and sends\n// it back on acquire.\nfunc (qb *quotaPool) reset(v int) {\n\tqb.mu.Lock()\n\tdefer qb.mu.Unlock()\n\tselect {\n\tcase n := <-qb.c:\n\t\tqb.quota += n\n\tdefault:\n\t}\n\tqb.quota += v\n\tif qb.quota <= 0 {\n\t\treturn\n\t}\n\tselect {\n\tcase qb.c <- qb.quota:\n\t\tqb.quota = 0\n\tdefault:\n\t}\n}\n\n// acquire returns the channel on which available quota amounts are sent.\nfunc (qb *quotaPool) acquire() <-chan int {\n\treturn qb.c\n}\n\n// inFlow deals with inbound flow control\ntype inFlow struct {\n\t// The inbound flow control limit for pending data.\n\tlimit uint32\n\t// conn points to the shared connection-level inFlow that is shared\n\t// by all streams on that conn. It is nil for the inFlow on the conn\n\t// directly.\n\tconn *inFlow\n\n\tmu sync.Mutex\n\t// pendingData is the overall data which have been received but not been\n\t// consumed by applications.\n\tpendingData uint32\n\t// The amount of data the application has consumed but grpc has not sent\n\t// window update for them. Used to reduce window update frequency.\n\tpendingUpdate uint32\n}\n\n// onData is invoked when some data frame is received. It increments not only its\n// own pendingData but also that of the associated connection-level flow.\nfunc (f *inFlow) onData(n uint32) error {\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tif f.pendingData+f.pendingUpdate+n > f.limit {\n\t\treturn fmt.Errorf(\"recieved %d-bytes data exceeding the limit %d bytes\", f.pendingData+f.pendingUpdate+n, f.limit)\n\t}\n\tif f.conn != nil {\n\t\tif err := f.conn.onData(n); err != nil {\n\t\t\treturn ConnectionErrorf(\"%v\", err)\n\t\t}\n\t}\n\tf.pendingData += n\n\treturn nil\n}\n\n// connOnRead updates the connection level states when the application consumes data.\nfunc (f *inFlow) connOnRead(n uint32) uint32 {\n\tif n == 0 || f.conn != nil {\n\t\treturn 0\n\t}\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tf.pendingData -= n\n\tf.pendingUpdate += n\n\tif f.pendingUpdate >= f.limit/4 {\n\t\tret := f.pendingUpdate\n\t\tf.pendingUpdate = 0\n\t\treturn ret\n\t}\n\treturn 0\n}\n\n// onRead is invoked when the application reads the data. It returns the window updates\n// for both stream and connection level.\nfunc (f *inFlow) onRead(n uint32) (swu, cwu uint32) {\n\tif n == 0 {\n\t\treturn\n\t}\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tif f.pendingData == 0 {\n\t\t// pendingData has been adjusted by restoreConn.\n\t\treturn\n\t}\n\tf.pendingData -= n\n\tf.pendingUpdate += n\n\tif f.pendingUpdate >= f.limit/4 {\n\t\tswu = f.pendingUpdate\n\t\tf.pendingUpdate = 0\n\t}\n\tcwu = f.conn.connOnRead(n)\n\treturn\n}\n\n// restoreConn is invoked when a stream is terminated. It removes its stake in\n// the connection-level flow and resets its own state.\nfunc (f *inFlow) restoreConn() uint32 {\n\tif f.conn == nil {\n\t\treturn 0\n\t}\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tn := f.pendingData\n\tf.pendingData = 0\n\tf.pendingUpdate = 0\n\treturn f.conn.connOnRead(n)\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/transport/http2_client.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage transport\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"math\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/bradfitz/http2\"\n\t\"github.com/bradfitz/http2/hpack\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/grpclog\"\n\t\"google.golang.org/grpc/metadata\"\n)\n\n// http2Client implements the ClientTransport interface with HTTP2.\ntype http2Client struct {\n\ttarget string   // server name/addr\n\tconn   net.Conn // underlying communication channel\n\tnextID uint32   // the next stream ID to be used\n\n\t// writableChan synchronizes write access to the transport.\n\t// A writer acquires the write lock by sending a value on writableChan\n\t// and releases it by receiving from writableChan.\n\twritableChan chan int\n\t// shutdownChan is closed when Close is called.\n\t// Blocking operations should select on shutdownChan to avoid\n\t// blocking forever after Close.\n\t// TODO(zhaoq): Maybe have a channel context?\n\tshutdownChan chan struct{}\n\t// errorChan is closed to notify the I/O error to the caller.\n\terrorChan chan struct{}\n\n\tframer *framer\n\thBuf   *bytes.Buffer  // the buffer for HPACK encoding\n\thEnc   *hpack.Encoder // HPACK encoder\n\n\t// controlBuf delivers all the control related tasks (e.g., window\n\t// updates, reset streams, and various settings) to the controller.\n\tcontrolBuf *recvBuffer\n\tfc         *inFlow\n\t// sendQuotaPool provides flow control to outbound message.\n\tsendQuotaPool *quotaPool\n\t// streamsQuota limits the max number of concurrent streams.\n\tstreamsQuota *quotaPool\n\n\t// The scheme used: https if TLS is on, http otherwise.\n\tscheme string\n\n\tauthCreds []credentials.Credentials\n\n\tmu            sync.Mutex     // guard the following variables\n\tstate         transportState // the state of underlying connection\n\tactiveStreams map[uint32]*Stream\n\t// The max number of concurrent streams\n\tmaxStreams int\n\t// the per-stream outbound flow control window size set by the peer.\n\tstreamSendQuota uint32\n}\n\n// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2\n// and starts to receive messages on it. Non-nil error returns if construction\n// fails.\nfunc newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) {\n\tif opts.Dialer == nil {\n\t\t// Set the default Dialer.\n\t\topts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) {\n\t\t\treturn net.DialTimeout(\"tcp\", addr, timeout)\n\t\t}\n\t}\n\tscheme := \"http\"\n\tstartT := time.Now()\n\ttimeout := opts.Timeout\n\tconn, connErr := opts.Dialer(addr, timeout)\n\tif connErr != nil {\n\t\treturn nil, ConnectionErrorf(\"transport: %v\", connErr)\n\t}\n\tfor _, c := range opts.AuthOptions {\n\t\tif ccreds, ok := c.(credentials.TransportAuthenticator); ok {\n\t\t\tscheme = \"https\"\n\t\t\t// TODO(zhaoq): Now the first TransportAuthenticator is used if there are\n\t\t\t// multiple ones provided. Revisit this if it is not appropriate. Probably\n\t\t\t// place the ClientTransport construction into a separate function to make\n\t\t\t// things clear.\n\t\t\tif timeout > 0 {\n\t\t\t\ttimeout -= time.Since(startT)\n\t\t\t}\n\t\t\tconn, connErr = ccreds.ClientHandshake(addr, conn, timeout)\n\t\t\tbreak\n\t\t}\n\t}\n\tif connErr != nil {\n\t\treturn nil, ConnectionErrorf(\"transport: %v\", connErr)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}()\n\t// Send connection preface to server.\n\tn, err := conn.Write(clientPreface)\n\tif err != nil {\n\t\treturn nil, ConnectionErrorf(\"transport: %v\", err)\n\t}\n\tif n != len(clientPreface) {\n\t\treturn nil, ConnectionErrorf(\"transport: preface mismatch, wrote %d bytes; want %d\", n, len(clientPreface))\n\t}\n\tframer := newFramer(conn)\n\tif initialWindowSize != defaultWindowSize {\n\t\terr = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})\n\t} else {\n\t\terr = framer.writeSettings(true)\n\t}\n\tif err != nil {\n\t\treturn nil, ConnectionErrorf(\"transport: %v\", err)\n\t}\n\t// Adjust the connection flow control window if needed.\n\tif delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {\n\t\tif err := framer.writeWindowUpdate(true, 0, delta); err != nil {\n\t\t\treturn nil, ConnectionErrorf(\"transport: %v\", err)\n\t\t}\n\t}\n\tvar buf bytes.Buffer\n\tt := &http2Client{\n\t\ttarget: addr,\n\t\tconn:   conn,\n\t\t// The client initiated stream id is odd starting from 1.\n\t\tnextID:          1,\n\t\twritableChan:    make(chan int, 1),\n\t\tshutdownChan:    make(chan struct{}),\n\t\terrorChan:       make(chan struct{}),\n\t\tframer:          framer,\n\t\thBuf:            &buf,\n\t\thEnc:            hpack.NewEncoder(&buf),\n\t\tcontrolBuf:      newRecvBuffer(),\n\t\tfc:              &inFlow{limit: initialConnWindowSize},\n\t\tsendQuotaPool:   newQuotaPool(defaultWindowSize),\n\t\tscheme:          scheme,\n\t\tstate:           reachable,\n\t\tactiveStreams:   make(map[uint32]*Stream),\n\t\tauthCreds:       opts.AuthOptions,\n\t\tmaxStreams:      math.MaxInt32,\n\t\tstreamSendQuota: defaultWindowSize,\n\t}\n\tgo t.controller()\n\tt.writableChan <- 0\n\t// Start the reader goroutine for incoming message. The threading model\n\t// on receiving is that each transport has a dedicated goroutine which\n\t// reads HTTP2 frame from network. Then it dispatches the frame to the\n\t// corresponding stream entity.\n\tgo t.reader()\n\treturn t, nil\n}\n\nfunc (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr, sq bool) *Stream {\n\tfc := &inFlow{\n\t\tlimit: initialWindowSize,\n\t\tconn:  t.fc,\n\t}\n\t// TODO(zhaoq): Handle uint32 overflow of Stream.id.\n\ts := &Stream{\n\t\tid:            t.nextID,\n\t\tmethod:        callHdr.Method,\n\t\tbuf:           newRecvBuffer(),\n\t\tupdateStreams: sq,\n\t\tfc:            fc,\n\t\tsendQuotaPool: newQuotaPool(int(t.streamSendQuota)),\n\t\theaderChan:    make(chan struct{}),\n\t}\n\tt.nextID += 2\n\ts.windowHandler = func(n int) {\n\t\tt.updateWindow(s, uint32(n))\n\t}\n\t// Make a stream be able to cancel the pending operations by itself.\n\ts.ctx, s.cancel = context.WithCancel(ctx)\n\ts.dec = &recvBufferReader{\n\t\tctx:  s.ctx,\n\t\trecv: s.buf,\n\t}\n\treturn s\n}\n\n// NewStream creates a stream and register it into the transport as \"active\"\n// streams.\nfunc (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {\n\t// Record the timeout value on the context.\n\tvar timeout time.Duration\n\tif dl, ok := ctx.Deadline(); ok {\n\t\ttimeout = dl.Sub(time.Now())\n\t\tif timeout <= 0 {\n\t\t\treturn nil, ContextErr(context.DeadlineExceeded)\n\t\t}\n\t}\n\tauthData := make(map[string]string)\n\tfor _, c := range t.authCreds {\n\t\tdata, err := c.GetRequestMetadata(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, StreamErrorf(codes.InvalidArgument, \"transport: %v\", err)\n\t\t}\n\t\tfor k, v := range data {\n\t\t\tauthData[k] = v\n\t\t}\n\t}\n\tt.mu.Lock()\n\tif t.state != reachable {\n\t\tt.mu.Unlock()\n\t\treturn nil, ErrConnClosing\n\t}\n\tcheckStreamsQuota := t.streamsQuota != nil\n\tt.mu.Unlock()\n\tif checkStreamsQuota {\n\t\tsq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Returns the quota balance back.\n\t\tif sq > 1 {\n\t\t\tt.streamsQuota.add(sq - 1)\n\t\t}\n\t}\n\tif _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil {\n\t\t// t.streamsQuota will be updated when t.CloseStream is invoked.\n\t\treturn nil, err\n\t}\n\tt.mu.Lock()\n\ts := t.newStream(ctx, callHdr, checkStreamsQuota)\n\tt.activeStreams[s.id] = s\n\tt.mu.Unlock()\n\t// HPACK encodes various headers. Note that once WriteField(...) is\n\t// called, the corresponding headers/continuation frame has to be sent\n\t// because hpack.Encoder is stateful.\n\tt.hBuf.Reset()\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \":method\", Value: \"POST\"})\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \":scheme\", Value: t.scheme})\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \":path\", Value: callHdr.Method})\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \":authority\", Value: callHdr.Host})\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: \"application/grpc\"})\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \"te\", Value: \"trailers\"})\n\tif timeout > 0 {\n\t\tt.hEnc.WriteField(hpack.HeaderField{Name: \"grpc-timeout\", Value: timeoutEncode(timeout)})\n\t}\n\tfor k, v := range authData {\n\t\tt.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t}\n\tvar (\n\t\thasMD      bool\n\t\tendHeaders bool\n\t)\n\tif md, ok := metadata.FromContext(ctx); ok {\n\t\thasMD = true\n\t\tfor k, v := range md {\n\t\t\tt.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t\t}\n\t}\n\tfirst := true\n\t// Sends the headers in a single batch even when they span multiple frames.\n\tfor !endHeaders {\n\t\tsize := t.hBuf.Len()\n\t\tif size > http2MaxFrameLen {\n\t\t\tsize = http2MaxFrameLen\n\t\t} else {\n\t\t\tendHeaders = true\n\t\t}\n\t\tif first {\n\t\t\t// Sends a HeadersFrame to server to start a new stream.\n\t\t\tp := http2.HeadersFrameParam{\n\t\t\t\tStreamID:      s.id,\n\t\t\t\tBlockFragment: t.hBuf.Next(size),\n\t\t\t\tEndStream:     false,\n\t\t\t\tEndHeaders:    endHeaders,\n\t\t\t}\n\t\t\t// Do a force flush for the buffered frames iff it is the last headers frame\n\t\t\t// and there is header metadata to be sent. Otherwise, there is flushing until\n\t\t\t// the corresponding data frame is written.\n\t\t\terr = t.framer.writeHeaders(hasMD && endHeaders, p)\n\t\t\tfirst = false\n\t\t} else {\n\t\t\t// Sends Continuation frames for the leftover headers.\n\t\t\terr = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size))\n\t\t}\n\t\tif err != nil {\n\t\t\tt.notifyError(err)\n\t\t\treturn nil, ConnectionErrorf(\"transport: %v\", err)\n\t\t}\n\t}\n\tt.writableChan <- 0\n\treturn s, nil\n}\n\n// CloseStream clears the footprint of a stream when the stream is not needed any more.\n// This must not be executed in reader's goroutine.\nfunc (t *http2Client) CloseStream(s *Stream, err error) {\n\tt.mu.Lock()\n\tdelete(t.activeStreams, s.id)\n\tt.mu.Unlock()\n\tif s.updateStreams {\n\t\tt.streamsQuota.add(1)\n\t}\n\ts.mu.Lock()\n\tif q := s.fc.restoreConn(); q > 0 {\n\t\tt.controlBuf.put(&windowUpdate{0, q})\n\t}\n\tif s.state == streamDone {\n\t\ts.mu.Unlock()\n\t\treturn\n\t}\n\tif !s.headerDone {\n\t\tclose(s.headerChan)\n\t\ts.headerDone = true\n\t}\n\ts.state = streamDone\n\ts.mu.Unlock()\n\t// In case stream sending and receiving are invoked in separate\n\t// goroutines (e.g., bi-directional streaming), the caller needs\n\t// to call cancel on the stream to interrupt the blocking on\n\t// other goroutines.\n\ts.cancel()\n\tif _, ok := err.(StreamError); ok {\n\t\tt.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel})\n\t}\n}\n\n// Close kicks off the shutdown process of the transport. This should be called\n// only once on a transport. Once it is called, the transport should not be\n// accessed any more.\nfunc (t *http2Client) Close() (err error) {\n\tt.mu.Lock()\n\tif t.state == closing {\n\t\tt.mu.Unlock()\n\t\treturn errors.New(\"transport: Close() was already called\")\n\t}\n\tt.state = closing\n\tt.mu.Unlock()\n\tclose(t.shutdownChan)\n\terr = t.conn.Close()\n\tt.mu.Lock()\n\tstreams := t.activeStreams\n\tt.activeStreams = nil\n\tt.mu.Unlock()\n\t// Notify all active streams.\n\tfor _, s := range streams {\n\t\ts.mu.Lock()\n\t\tif !s.headerDone {\n\t\t\tclose(s.headerChan)\n\t\t\ts.headerDone = true\n\t\t}\n\t\ts.mu.Unlock()\n\t\ts.write(recvMsg{err: ErrConnClosing})\n\t}\n\treturn\n}\n\n// Write formats the data into HTTP2 data frame(s) and sends it out. The caller\n// should proceed only if Write returns nil.\n// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later\n// if it improves the performance.\nfunc (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {\n\tr := bytes.NewBuffer(data)\n\tfor {\n\t\tvar p []byte\n\t\tif r.Len() > 0 {\n\t\t\tsize := http2MaxFrameLen\n\t\t\ts.sendQuotaPool.add(0)\n\t\t\t// Wait until the stream has some quota to send the data.\n\t\t\tsq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.sendQuotaPool.add(0)\n\t\t\t// Wait until the transport has some quota to send the data.\n\t\t\ttq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())\n\t\t\tif err != nil {\n\t\t\t\tif _, ok := err.(StreamError); ok {\n\t\t\t\t\tt.sendQuotaPool.cancel()\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif sq < size {\n\t\t\t\tsize = sq\n\t\t\t}\n\t\t\tif tq < size {\n\t\t\t\tsize = tq\n\t\t\t}\n\t\t\tp = r.Next(size)\n\t\t\tps := len(p)\n\t\t\tif ps < sq {\n\t\t\t\t// Overbooked stream quota. Return it back.\n\t\t\t\ts.sendQuotaPool.add(sq - ps)\n\t\t\t}\n\t\t\tif ps < tq {\n\t\t\t\t// Overbooked transport quota. Return it back.\n\t\t\t\tt.sendQuotaPool.add(tq - ps)\n\t\t\t}\n\t\t}\n\t\tvar (\n\t\t\tendStream  bool\n\t\t\tforceFlush bool\n\t\t)\n\t\tif opts.Last && r.Len() == 0 {\n\t\t\tendStream = true\n\t\t}\n\t\t// Indicate there is a writer who is about to write a data frame.\n\t\tt.framer.adjustNumWriters(1)\n\t\t// Got some quota. Try to acquire writing privilege on the transport.\n\t\tif _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {\n\t\t\tif t.framer.adjustNumWriters(-1) == 0 {\n\t\t\t\t// This writer is the last one in this batch and has the\n\t\t\t\t// responsibility to flush the buffered frames. It queues\n\t\t\t\t// a flush request to controlBuf instead of flushing directly\n\t\t\t\t// in order to avoid the race with other writing or flushing.\n\t\t\t\tt.controlBuf.put(&flushIO{})\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {\n\t\t\t// Do a force flush iff this is last frame for the entire gRPC message\n\t\t\t// and the caller is the only writer at this moment.\n\t\t\tforceFlush = true\n\t\t}\n\t\t// If WriteData fails, all the pending streams will be handled\n\t\t// by http2Client.Close(). No explicit CloseStream() needs to be\n\t\t// invoked.\n\t\tif err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {\n\t\t\tt.notifyError(err)\n\t\t\treturn ConnectionErrorf(\"transport: %v\", err)\n\t\t}\n\t\tif t.framer.adjustNumWriters(-1) == 0 {\n\t\t\tt.framer.flushWrite()\n\t\t}\n\t\tt.writableChan <- 0\n\t\tif r.Len() == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\tif !opts.Last {\n\t\treturn nil\n\t}\n\ts.mu.Lock()\n\tif s.state != streamDone {\n\t\tif s.state == streamReadDone {\n\t\t\ts.state = streamDone\n\t\t} else {\n\t\t\ts.state = streamWriteDone\n\t\t}\n\t}\n\ts.mu.Unlock()\n\treturn nil\n}\n\nfunc (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif t.activeStreams == nil {\n\t\t// The transport is closing.\n\t\treturn nil, false\n\t}\n\tif s, ok := t.activeStreams[f.Header().StreamID]; ok {\n\t\treturn s, true\n\t}\n\treturn nil, false\n}\n\n// updateWindow adjusts the inbound quota for the stream and the transport.\n// Window updates will deliver to the controller for sending when\n// the cumulative quota exceeds the corresponding threshold.\nfunc (t *http2Client) updateWindow(s *Stream, n uint32) {\n\tswu, cwu := s.fc.onRead(n)\n\tif swu > 0 {\n\t\tt.controlBuf.put(&windowUpdate{s.id, swu})\n\t}\n\tif cwu > 0 {\n\t\tt.controlBuf.put(&windowUpdate{0, cwu})\n\t}\n}\n\nfunc (t *http2Client) handleData(f *http2.DataFrame) {\n\t// Select the right stream to dispatch.\n\ts, ok := t.getStream(f)\n\tif !ok {\n\t\treturn\n\t}\n\tsize := len(f.Data())\n\tif err := s.fc.onData(uint32(size)); err != nil {\n\t\tif _, ok := err.(ConnectionError); ok {\n\t\t\tt.notifyError(err)\n\t\t\treturn\n\t\t}\n\t\ts.mu.Lock()\n\t\tif s.state == streamDone {\n\t\t\ts.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\ts.state = streamDone\n\t\ts.statusCode = codes.Internal\n\t\ts.statusDesc = err.Error()\n\t\ts.mu.Unlock()\n\t\ts.write(recvMsg{err: io.EOF})\n\t\tt.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})\n\t\treturn\n\t}\n\t// TODO(bradfitz, zhaoq): A copy is required here because there is no\n\t// guarantee f.Data() is consumed before the arrival of next frame.\n\t// Can this copy be eliminated?\n\tdata := make([]byte, size)\n\tcopy(data, f.Data())\n\ts.write(recvMsg{data: data})\n}\n\nfunc (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {\n\ts, ok := t.getStream(f)\n\tif !ok {\n\t\treturn\n\t}\n\ts.mu.Lock()\n\tif s.state == streamDone {\n\t\ts.mu.Unlock()\n\t\treturn\n\t}\n\ts.state = streamDone\n\ts.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)]\n\tif !ok {\n\t\tgrpclog.Println(\"transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error \", f.ErrCode)\n\t}\n\ts.mu.Unlock()\n\ts.write(recvMsg{err: io.EOF})\n}\n\nfunc (t *http2Client) handleSettings(f *http2.SettingsFrame) {\n\tif f.IsAck() {\n\t\treturn\n\t}\n\tf.ForeachSetting(func(s http2.Setting) error {\n\t\tif v, ok := f.Value(s.ID); ok {\n\t\t\tswitch s.ID {\n\t\t\tcase http2.SettingMaxConcurrentStreams:\n\t\t\t\t// TODO(zhaoq): This is a hack to avoid significant refactoring of the\n\t\t\t\t// code to deal with the unrealistic int32 overflow. Probably will try\n\t\t\t\t// to find a better way to handle this later.\n\t\t\t\tif v > math.MaxInt32 {\n\t\t\t\t\tv = math.MaxInt32\n\t\t\t\t}\n\t\t\t\tt.mu.Lock()\n\t\t\t\treset := t.streamsQuota != nil\n\t\t\t\tif !reset {\n\t\t\t\t\tt.streamsQuota = newQuotaPool(int(v))\n\t\t\t\t}\n\t\t\t\tms := t.maxStreams\n\t\t\t\tt.maxStreams = int(v)\n\t\t\t\tt.mu.Unlock()\n\t\t\t\tif reset {\n\t\t\t\t\tt.streamsQuota.reset(int(v) - ms)\n\t\t\t\t}\n\t\t\tcase http2.SettingInitialWindowSize:\n\t\t\t\tt.mu.Lock()\n\t\t\t\tfor _, s := range t.activeStreams {\n\t\t\t\t\t// Adjust the sending quota for each s.\n\t\t\t\t\ts.sendQuotaPool.reset(int(v - t.streamSendQuota))\n\t\t\t\t}\n\t\t\t\tt.streamSendQuota = v\n\t\t\t\tt.mu.Unlock()\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tt.controlBuf.put(&settings{ack: true})\n}\n\nfunc (t *http2Client) handlePing(f *http2.PingFrame) {\n\tt.controlBuf.put(&ping{true})\n}\n\nfunc (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {\n\t// TODO(zhaoq): GoAwayFrame handler to be implemented\"\n}\n\nfunc (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {\n\tid := f.Header().StreamID\n\tincr := f.Increment\n\tif id == 0 {\n\t\tt.sendQuotaPool.add(int(incr))\n\t\treturn\n\t}\n\tif s, ok := t.getStream(f); ok {\n\t\ts.sendQuotaPool.add(int(incr))\n\t}\n}\n\n// operateHeader takes action on the decoded headers. It returns the current\n// stream if there are remaining headers on the wire (in the following\n// Continuation frame).\nfunc (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) {\n\tdefer func() {\n\t\tif pendingStream == nil {\n\t\t\thDec.state = decodeState{}\n\t\t}\n\t}()\n\tendHeaders, err := hDec.decodeClientHTTP2Headers(frame)\n\tif s == nil {\n\t\t// s has been closed.\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\ts.write(recvMsg{err: err})\n\t\t// Something wrong. Stops reading even when there is remaining.\n\t\treturn nil\n\t}\n\tif !endHeaders {\n\t\treturn s\n\t}\n\n\ts.mu.Lock()\n\tif !s.headerDone {\n\t\tif !endStream && len(hDec.state.mdata) > 0 {\n\t\t\ts.header = hDec.state.mdata\n\t\t}\n\t\tclose(s.headerChan)\n\t\ts.headerDone = true\n\t}\n\tif !endStream || s.state == streamDone {\n\t\ts.mu.Unlock()\n\t\treturn nil\n\t}\n\n\tif len(hDec.state.mdata) > 0 {\n\t\ts.trailer = hDec.state.mdata\n\t}\n\ts.state = streamDone\n\ts.statusCode = hDec.state.statusCode\n\ts.statusDesc = hDec.state.statusDesc\n\ts.mu.Unlock()\n\n\ts.write(recvMsg{err: io.EOF})\n\treturn nil\n}\n\n// reader runs as a separate goroutine in charge of reading data from network\n// connection.\n//\n// TODO(zhaoq): currently one reader per transport. Investigate whether this is\n// optimal.\n// TODO(zhaoq): Check the validity of the incoming frame sequence.\nfunc (t *http2Client) reader() {\n\t// Check the validity of server preface.\n\tframe, err := t.framer.readFrame()\n\tif err != nil {\n\t\tt.notifyError(err)\n\t\treturn\n\t}\n\tsf, ok := frame.(*http2.SettingsFrame)\n\tif !ok {\n\t\tt.notifyError(err)\n\t\treturn\n\t}\n\tt.handleSettings(sf)\n\n\thDec := newHPACKDecoder()\n\tvar curStream *Stream\n\t// loop to keep reading incoming messages on this transport.\n\tfor {\n\t\tframe, err := t.framer.readFrame()\n\t\tif err != nil {\n\t\t\tt.notifyError(err)\n\t\t\treturn\n\t\t}\n\t\tswitch frame := frame.(type) {\n\t\tcase *http2.HeadersFrame:\n\t\t\t// operateHeaders has to be invoked regardless the value of curStream\n\t\t\t// because the HPACK decoder needs to be updated using the received\n\t\t\t// headers.\n\t\t\tcurStream, _ = t.getStream(frame)\n\t\t\tendStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)\n\t\t\tcurStream = t.operateHeaders(hDec, curStream, frame, endStream)\n\t\tcase *http2.ContinuationFrame:\n\t\t\tcurStream = t.operateHeaders(hDec, curStream, frame, false)\n\t\tcase *http2.DataFrame:\n\t\t\tt.handleData(frame)\n\t\tcase *http2.RSTStreamFrame:\n\t\t\tt.handleRSTStream(frame)\n\t\tcase *http2.SettingsFrame:\n\t\t\tt.handleSettings(frame)\n\t\tcase *http2.PingFrame:\n\t\t\tt.handlePing(frame)\n\t\tcase *http2.GoAwayFrame:\n\t\t\tt.handleGoAway(frame)\n\t\tcase *http2.WindowUpdateFrame:\n\t\t\tt.handleWindowUpdate(frame)\n\t\tdefault:\n\t\t\tgrpclog.Printf(\"transport: http2Client.reader got unhandled frame type %v.\", frame)\n\t\t}\n\t}\n}\n\n// controller running in a separate goroutine takes charge of sending control\n// frames (e.g., window update, reset stream, setting, etc.) to the server.\nfunc (t *http2Client) controller() {\n\tfor {\n\t\tselect {\n\t\tcase i := <-t.controlBuf.get():\n\t\t\tt.controlBuf.load()\n\t\t\tselect {\n\t\t\tcase <-t.writableChan:\n\t\t\t\tswitch i := i.(type) {\n\t\t\t\tcase *windowUpdate:\n\t\t\t\t\tt.framer.writeWindowUpdate(true, i.streamID, i.increment)\n\t\t\t\tcase *settings:\n\t\t\t\t\tif i.ack {\n\t\t\t\t\t\tt.framer.writeSettingsAck(true)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.framer.writeSettings(true, i.setting...)\n\t\t\t\t\t}\n\t\t\t\tcase *resetStream:\n\t\t\t\t\tt.framer.writeRSTStream(true, i.streamID, i.code)\n\t\t\t\tcase *flushIO:\n\t\t\t\t\tt.framer.flushWrite()\n\t\t\t\tcase *ping:\n\t\t\t\t\t// TODO(zhaoq): Ack with all-0 data now. will change to some\n\t\t\t\t\t// meaningful content when this is actually in use.\n\t\t\t\t\tt.framer.writePing(true, i.ack, [8]byte{})\n\t\t\t\tdefault:\n\t\t\t\t\tgrpclog.Printf(\"transport: http2Client.controller got unexpected item type %v\\n\", i)\n\t\t\t\t}\n\t\t\t\tt.writableChan <- 0\n\t\t\t\tcontinue\n\t\t\tcase <-t.shutdownChan:\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-t.shutdownChan:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (t *http2Client) Error() <-chan struct{} {\n\treturn t.errorChan\n}\n\nfunc (t *http2Client) notifyError(err error) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\t// make sure t.errorChan is closed only once.\n\tif t.state == reachable {\n\t\tt.state = unreachable\n\t\tclose(t.errorChan)\n\t\tgrpclog.Printf(\"transport: http2Client.notifyError got notified that the client transport was broken %v.\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/transport/http2_server.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage transport\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"math\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com/bradfitz/http2\"\n\t\"github.com/bradfitz/http2/hpack\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/grpclog\"\n\t\"google.golang.org/grpc/metadata\"\n)\n\n// ErrIllegalHeaderWrite indicates that setting header is illegal because of\n// the stream's state.\nvar ErrIllegalHeaderWrite = errors.New(\"transport: the stream is done or WriteHeader was already called\")\n\n// http2Server implements the ServerTransport interface with HTTP2.\ntype http2Server struct {\n\tconn        net.Conn\n\tmaxStreamID uint32 // max stream ID ever seen\n\t// writableChan synchronizes write access to the transport.\n\t// A writer acquires the write lock by sending a value on writableChan\n\t// and releases it by receiving from writableChan.\n\twritableChan chan int\n\t// shutdownChan is closed when Close is called.\n\t// Blocking operations should select on shutdownChan to avoid\n\t// blocking forever after Close.\n\tshutdownChan chan struct{}\n\tframer       *framer\n\thBuf         *bytes.Buffer  // the buffer for HPACK encoding\n\thEnc         *hpack.Encoder // HPACK encoder\n\n\t// The max number of concurrent streams.\n\tmaxStreams uint32\n\t// controlBuf delivers all the control related tasks (e.g., window\n\t// updates, reset streams, and various settings) to the controller.\n\tcontrolBuf *recvBuffer\n\tfc         *inFlow\n\t// sendQuotaPool provides flow control to outbound message.\n\tsendQuotaPool *quotaPool\n\n\tmu            sync.Mutex // guard the following\n\tstate         transportState\n\tactiveStreams map[uint32]*Stream\n\t// the per-stream outbound flow control window size set by the peer.\n\tstreamSendQuota uint32\n}\n\n// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is\n// returned if something goes wrong.\nfunc newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err error) {\n\tframer := newFramer(conn)\n\t// Send initial settings as connection preface to client.\n\t// TODO(zhaoq): Have a better way to signal \"no limit\" because 0 is\n\t// permitted in the HTTP2 spec.\n\tvar settings []http2.Setting\n\t// TODO(zhaoq): Have a better way to signal \"no limit\" because 0 is\n\t// permitted in the HTTP2 spec.\n\tif maxStreams == 0 {\n\t\tmaxStreams = math.MaxUint32\n\t} else {\n\t\tsettings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams})\n\t}\n\tif initialWindowSize != defaultWindowSize {\n\t\tsettings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})\n\t}\n\tif err := framer.writeSettings(true, settings...); err != nil {\n\t\treturn nil, ConnectionErrorf(\"transport: %v\", err)\n\t}\n\t// Adjust the connection flow control window if needed.\n\tif delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {\n\t\tif err := framer.writeWindowUpdate(true, 0, delta); err != nil {\n\t\t\treturn nil, ConnectionErrorf(\"transport: %v\", err)\n\t\t}\n\t}\n\tvar buf bytes.Buffer\n\tt := &http2Server{\n\t\tconn:            conn,\n\t\tframer:          framer,\n\t\thBuf:            &buf,\n\t\thEnc:            hpack.NewEncoder(&buf),\n\t\tmaxStreams:      maxStreams,\n\t\tcontrolBuf:      newRecvBuffer(),\n\t\tfc:              &inFlow{limit: initialConnWindowSize},\n\t\tsendQuotaPool:   newQuotaPool(defaultWindowSize),\n\t\tstate:           reachable,\n\t\twritableChan:    make(chan int, 1),\n\t\tshutdownChan:    make(chan struct{}),\n\t\tactiveStreams:   make(map[uint32]*Stream),\n\t\tstreamSendQuota: defaultWindowSize,\n\t}\n\tgo t.controller()\n\tt.writableChan <- 0\n\treturn t, nil\n}\n\n// operateHeader takes action on the decoded headers. It returns the current\n// stream if there are remaining headers on the wire (in the following\n// Continuation frame).\nfunc (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) {\n\tdefer func() {\n\t\tif pendingStream == nil {\n\t\t\thDec.state = decodeState{}\n\t\t}\n\t}()\n\tendHeaders, err := hDec.decodeServerHTTP2Headers(frame)\n\tif s == nil {\n\t\t// s has been closed.\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\tgrpclog.Printf(\"transport: http2Server.operateHeader found %v\", err)\n\t\tif se, ok := err.(StreamError); ok {\n\t\t\tt.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})\n\t\t}\n\t\treturn nil\n\t}\n\tif endStream {\n\t\t// s is just created by the caller. No lock needed.\n\t\ts.state = streamReadDone\n\t}\n\tif !endHeaders {\n\t\treturn s\n\t}\n\tt.mu.Lock()\n\tif t.state != reachable {\n\t\tt.mu.Unlock()\n\t\treturn nil\n\t}\n\tif uint32(len(t.activeStreams)) >= t.maxStreams {\n\t\tt.mu.Unlock()\n\t\tt.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})\n\t\treturn nil\n\t}\n\ts.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))\n\tt.activeStreams[s.id] = s\n\tt.mu.Unlock()\n\ts.windowHandler = func(n int) {\n\t\tt.updateWindow(s, uint32(n))\n\t}\n\tif hDec.state.timeoutSet {\n\t\ts.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout)\n\t} else {\n\t\ts.ctx, s.cancel = context.WithCancel(context.TODO())\n\t}\n\t// Cache the current stream to the context so that the server application\n\t// can find out. Required when the server wants to send some metadata\n\t// back to the client (unary call only).\n\ts.ctx = newContextWithStream(s.ctx, s)\n\t// Attach the received metadata to the context.\n\tif len(hDec.state.mdata) > 0 {\n\t\ts.ctx = metadata.NewContext(s.ctx, hDec.state.mdata)\n\t}\n\n\ts.dec = &recvBufferReader{\n\t\tctx:  s.ctx,\n\t\trecv: s.buf,\n\t}\n\ts.method = hDec.state.method\n\n\twg.Add(1)\n\tgo func() {\n\t\thandle(s)\n\t\twg.Done()\n\t}()\n\treturn nil\n}\n\n// HandleStreams receives incoming streams using the given handler. This is\n// typically run in a separate goroutine.\nfunc (t *http2Server) HandleStreams(handle func(*Stream)) {\n\t// Check the validity of client preface.\n\tpreface := make([]byte, len(clientPreface))\n\tif _, err := io.ReadFull(t.conn, preface); err != nil {\n\t\tgrpclog.Printf(\"transport: http2Server.HandleStreams failed to receive the preface from client: %v\", err)\n\t\tt.Close()\n\t\treturn\n\t}\n\tif !bytes.Equal(preface, clientPreface) {\n\t\tgrpclog.Printf(\"transport: http2Server.HandleStreams received bogus greeting from client: %q\", preface)\n\t\tt.Close()\n\t\treturn\n\t}\n\n\tframe, err := t.framer.readFrame()\n\tif err != nil {\n\t\tgrpclog.Printf(\"transport: http2Server.HandleStreams failed to read frame: %v\", err)\n\t\tt.Close()\n\t\treturn\n\t}\n\tsf, ok := frame.(*http2.SettingsFrame)\n\tif !ok {\n\t\tgrpclog.Printf(\"transport: http2Server.HandleStreams saw invalid preface type %T from client\", frame)\n\t\tt.Close()\n\t\treturn\n\t}\n\tt.handleSettings(sf)\n\n\thDec := newHPACKDecoder()\n\tvar curStream *Stream\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor {\n\t\tframe, err := t.framer.readFrame()\n\t\tif err != nil {\n\t\t\tt.Close()\n\t\t\treturn\n\t\t}\n\t\tswitch frame := frame.(type) {\n\t\tcase *http2.HeadersFrame:\n\t\t\tid := frame.Header().StreamID\n\t\t\tif id%2 != 1 || id <= t.maxStreamID {\n\t\t\t\t// illegal gRPC stream id.\n\t\t\t\tgrpclog.Println(\"transport: http2Server.HandleStreams received an illegal stream id: \", id)\n\t\t\t\tt.Close()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.maxStreamID = id\n\t\t\tbuf := newRecvBuffer()\n\t\t\tfc := &inFlow{\n\t\t\t\tlimit: initialWindowSize,\n\t\t\t\tconn:  t.fc,\n\t\t\t}\n\t\t\tcurStream = &Stream{\n\t\t\t\tid:  frame.Header().StreamID,\n\t\t\t\tst:  t,\n\t\t\t\tbuf: buf,\n\t\t\t\tfc:  fc,\n\t\t\t}\n\t\t\tendStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)\n\t\t\tcurStream = t.operateHeaders(hDec, curStream, frame, endStream, handle, &wg)\n\t\tcase *http2.ContinuationFrame:\n\t\t\tcurStream = t.operateHeaders(hDec, curStream, frame, false, handle, &wg)\n\t\tcase *http2.DataFrame:\n\t\t\tt.handleData(frame)\n\t\tcase *http2.RSTStreamFrame:\n\t\t\tt.handleRSTStream(frame)\n\t\tcase *http2.SettingsFrame:\n\t\t\tt.handleSettings(frame)\n\t\tcase *http2.PingFrame:\n\t\t\tt.handlePing(frame)\n\t\tcase *http2.WindowUpdateFrame:\n\t\t\tt.handleWindowUpdate(frame)\n\t\tcase *http2.GoAwayFrame:\n\t\t\tbreak\n\t\tdefault:\n\t\t\tgrpclog.Printf(\"transport: http2Server.HandleStreams found unhandled frame type %v.\", frame)\n\t\t}\n\t}\n}\n\nfunc (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif t.activeStreams == nil {\n\t\t// The transport is closing.\n\t\treturn nil, false\n\t}\n\ts, ok := t.activeStreams[f.Header().StreamID]\n\tif !ok {\n\t\t// The stream is already done.\n\t\treturn nil, false\n\t}\n\treturn s, true\n}\n\n// updateWindow adjusts the inbound quota for the stream and the transport.\n// Window updates will deliver to the controller for sending when\n// the cumulative quota exceeds the corresponding threshold.\nfunc (t *http2Server) updateWindow(s *Stream, n uint32) {\n\tswu, cwu := s.fc.onRead(n)\n\tif swu > 0 {\n\t\tt.controlBuf.put(&windowUpdate{s.id, swu})\n\t}\n\tif cwu > 0 {\n\t\tt.controlBuf.put(&windowUpdate{0, cwu})\n\t}\n}\n\nfunc (t *http2Server) handleData(f *http2.DataFrame) {\n\t// Select the right stream to dispatch.\n\ts, ok := t.getStream(f)\n\tif !ok {\n\t\treturn\n\t}\n\tsize := len(f.Data())\n\tif err := s.fc.onData(uint32(size)); err != nil {\n\t\tif _, ok := err.(ConnectionError); ok {\n\t\t\tgrpclog.Printf(\"transport: http2Server %v\", err)\n\t\t\tt.Close()\n\t\t\treturn\n\t\t}\n\t\tt.closeStream(s)\n\t\tt.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})\n\t\treturn\n\t}\n\t// TODO(bradfitz, zhaoq): A copy is required here because there is no\n\t// guarantee f.Data() is consumed before the arrival of next frame.\n\t// Can this copy be eliminated?\n\tdata := make([]byte, size)\n\tcopy(data, f.Data())\n\ts.write(recvMsg{data: data})\n\tif f.Header().Flags.Has(http2.FlagDataEndStream) {\n\t\t// Received the end of stream from the client.\n\t\ts.mu.Lock()\n\t\tif s.state != streamDone {\n\t\t\tif s.state == streamWriteDone {\n\t\t\t\ts.state = streamDone\n\t\t\t} else {\n\t\t\t\ts.state = streamReadDone\n\t\t\t}\n\t\t}\n\t\ts.mu.Unlock()\n\t\ts.write(recvMsg{err: io.EOF})\n\t}\n}\n\nfunc (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {\n\ts, ok := t.getStream(f)\n\tif !ok {\n\t\treturn\n\t}\n\tt.closeStream(s)\n}\n\nfunc (t *http2Server) handleSettings(f *http2.SettingsFrame) {\n\tif f.IsAck() {\n\t\treturn\n\t}\n\tf.ForeachSetting(func(s http2.Setting) error {\n\t\tif v, ok := f.Value(http2.SettingInitialWindowSize); ok {\n\t\t\tt.mu.Lock()\n\t\t\tdefer t.mu.Unlock()\n\t\t\tfor _, s := range t.activeStreams {\n\t\t\t\ts.sendQuotaPool.reset(int(v - t.streamSendQuota))\n\t\t\t}\n\t\t\tt.streamSendQuota = v\n\t\t}\n\t\treturn nil\n\t})\n\tt.controlBuf.put(&settings{ack: true})\n}\n\nfunc (t *http2Server) handlePing(f *http2.PingFrame) {\n\tt.controlBuf.put(&ping{true})\n}\n\nfunc (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {\n\tid := f.Header().StreamID\n\tincr := f.Increment\n\tif id == 0 {\n\t\tt.sendQuotaPool.add(int(incr))\n\t\treturn\n\t}\n\tif s, ok := t.getStream(f); ok {\n\t\ts.sendQuotaPool.add(int(incr))\n\t}\n}\n\nfunc (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {\n\tfirst := true\n\tendHeaders := false\n\tvar err error\n\t// Sends the headers in a single batch.\n\tfor !endHeaders {\n\t\tsize := t.hBuf.Len()\n\t\tif size > http2MaxFrameLen {\n\t\t\tsize = http2MaxFrameLen\n\t\t} else {\n\t\t\tendHeaders = true\n\t\t}\n\t\tif first {\n\t\t\tp := http2.HeadersFrameParam{\n\t\t\t\tStreamID:      s.id,\n\t\t\t\tBlockFragment: b.Next(size),\n\t\t\t\tEndStream:     endStream,\n\t\t\t\tEndHeaders:    endHeaders,\n\t\t\t}\n\t\t\terr = t.framer.writeHeaders(endHeaders, p)\n\t\t\tfirst = false\n\t\t} else {\n\t\t\terr = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Close()\n\t\t\treturn ConnectionErrorf(\"transport: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// WriteHeader sends the header metedata md back to the client.\nfunc (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {\n\ts.mu.Lock()\n\tif s.headerOk || s.state == streamDone {\n\t\ts.mu.Unlock()\n\t\treturn ErrIllegalHeaderWrite\n\t}\n\ts.headerOk = true\n\ts.mu.Unlock()\n\tif _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {\n\t\treturn err\n\t}\n\tt.hBuf.Reset()\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: \"application/grpc\"})\n\tfor k, v := range md {\n\t\tt.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t}\n\tif err := t.writeHeaders(s, t.hBuf, false); err != nil {\n\t\treturn err\n\t}\n\tt.writableChan <- 0\n\treturn nil\n}\n\n// WriteStatus sends stream status to the client and terminates the stream.\n// There is no further I/O operations being able to perform on this stream.\n// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early\n// OK is adopted.\nfunc (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {\n\ts.mu.RLock()\n\tif s.state == streamDone {\n\t\ts.mu.RUnlock()\n\t\treturn nil\n\t}\n\ts.mu.RUnlock()\n\tif _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {\n\t\treturn err\n\t}\n\tt.hBuf.Reset()\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\tt.hEnc.WriteField(\n\t\thpack.HeaderField{\n\t\t\tName:  \"grpc-status\",\n\t\t\tValue: strconv.Itoa(int(statusCode)),\n\t\t})\n\tt.hEnc.WriteField(hpack.HeaderField{Name: \"grpc-message\", Value: statusDesc})\n\t// Attach the trailer metadata.\n\tfor k, v := range s.trailer {\n\t\tt.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t}\n\tif err := t.writeHeaders(s, t.hBuf, true); err != nil {\n\t\tt.Close()\n\t\treturn err\n\t}\n\tt.closeStream(s)\n\tt.writableChan <- 0\n\treturn nil\n}\n\n// Write converts the data into HTTP2 data frame and sends it out. Non-nil error\n// is returns if it fails (e.g., framing error, transport error).\nfunc (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {\n\t// TODO(zhaoq): Support multi-writers for a single stream.\n\tvar writeHeaderFrame bool\n\ts.mu.Lock()\n\tif !s.headerOk {\n\t\twriteHeaderFrame = true\n\t\ts.headerOk = true\n\t}\n\ts.mu.Unlock()\n\tif writeHeaderFrame {\n\t\tif _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.hBuf.Reset()\n\t\tt.hEnc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\tt.hEnc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: \"application/grpc\"})\n\t\tp := http2.HeadersFrameParam{\n\t\t\tStreamID:      s.id,\n\t\t\tBlockFragment: t.hBuf.Bytes(),\n\t\t\tEndHeaders:    true,\n\t\t}\n\t\tif err := t.framer.writeHeaders(false, p); err != nil {\n\t\t\tt.Close()\n\t\t\treturn ConnectionErrorf(\"transport: %v\", err)\n\t\t}\n\t\tt.writableChan <- 0\n\t}\n\tr := bytes.NewBuffer(data)\n\tfor {\n\t\tif r.Len() == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tsize := http2MaxFrameLen\n\t\ts.sendQuotaPool.add(0)\n\t\t// Wait until the stream has some quota to send the data.\n\t\tsq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.sendQuotaPool.add(0)\n\t\t// Wait until the transport has some quota to send the data.\n\t\ttq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())\n\t\tif err != nil {\n\t\t\tif _, ok := err.(StreamError); ok {\n\t\t\t\tt.sendQuotaPool.cancel()\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif sq < size {\n\t\t\tsize = sq\n\t\t}\n\t\tif tq < size {\n\t\t\tsize = tq\n\t\t}\n\t\tp := r.Next(size)\n\t\tps := len(p)\n\t\tif ps < sq {\n\t\t\t// Overbooked stream quota. Return it back.\n\t\t\ts.sendQuotaPool.add(sq - ps)\n\t\t}\n\t\tif ps < tq {\n\t\t\t// Overbooked transport quota. Return it back.\n\t\t\tt.sendQuotaPool.add(tq - ps)\n\t\t}\n\t\tt.framer.adjustNumWriters(1)\n\t\t// Got some quota. Try to acquire writing privilege on the\n\t\t// transport.\n\t\tif _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {\n\t\t\tif t.framer.adjustNumWriters(-1) == 0 {\n\t\t\t\t// This writer is the last one in this batch and has the\n\t\t\t\t// responsibility to flush the buffered frames. It queues\n\t\t\t\t// a flush request to controlBuf instead of flushing directly\n\t\t\t\t// in order to avoid the race with other writing or flushing.\n\t\t\t\tt.controlBuf.put(&flushIO{})\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tvar forceFlush bool\n\t\tif r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {\n\t\t\tforceFlush = true\n\t\t}\n\t\tif err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {\n\t\t\tt.Close()\n\t\t\treturn ConnectionErrorf(\"transport: %v\", err)\n\t\t}\n\t\tif t.framer.adjustNumWriters(-1) == 0 {\n\t\t\tt.framer.flushWrite()\n\t\t}\n\t\tt.writableChan <- 0\n\t}\n\n}\n\n// controller running in a separate goroutine takes charge of sending control\n// frames (e.g., window update, reset stream, setting, etc.) to the server.\nfunc (t *http2Server) controller() {\n\tfor {\n\t\tselect {\n\t\tcase i := <-t.controlBuf.get():\n\t\t\tt.controlBuf.load()\n\t\t\tselect {\n\t\t\tcase <-t.writableChan:\n\t\t\t\tswitch i := i.(type) {\n\t\t\t\tcase *windowUpdate:\n\t\t\t\t\tt.framer.writeWindowUpdate(true, i.streamID, i.increment)\n\t\t\t\tcase *settings:\n\t\t\t\t\tif i.ack {\n\t\t\t\t\t\tt.framer.writeSettingsAck(true)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.framer.writeSettings(true, i.setting...)\n\t\t\t\t\t}\n\t\t\t\tcase *resetStream:\n\t\t\t\t\tt.framer.writeRSTStream(true, i.streamID, i.code)\n\t\t\t\tcase *flushIO:\n\t\t\t\t\tt.framer.flushWrite()\n\t\t\t\tcase *ping:\n\t\t\t\t\t// TODO(zhaoq): Ack with all-0 data now. will change to some\n\t\t\t\t\t// meaningful content when this is actually in use.\n\t\t\t\t\tt.framer.writePing(true, i.ack, [8]byte{})\n\t\t\t\tdefault:\n\t\t\t\t\tgrpclog.Printf(\"transport: http2Server.controller got unexpected item type %v\\n\", i)\n\t\t\t\t}\n\t\t\t\tt.writableChan <- 0\n\t\t\t\tcontinue\n\t\t\tcase <-t.shutdownChan:\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-t.shutdownChan:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Close starts shutting down the http2Server transport.\n// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This\n// could cause some resource issue. Revisit this later.\nfunc (t *http2Server) Close() (err error) {\n\tt.mu.Lock()\n\tif t.state == closing {\n\t\tt.mu.Unlock()\n\t\treturn errors.New(\"transport: Close() was already called\")\n\t}\n\tt.state = closing\n\tstreams := t.activeStreams\n\tt.activeStreams = nil\n\tt.mu.Unlock()\n\tclose(t.shutdownChan)\n\terr = t.conn.Close()\n\t// Notify all active streams.\n\tfor _, s := range streams {\n\t\ts.write(recvMsg{err: ErrConnClosing})\n\t}\n\treturn\n}\n\n// closeStream clears the footprint of a stream when the stream is not needed\n// any more.\nfunc (t *http2Server) closeStream(s *Stream) {\n\tt.mu.Lock()\n\tdelete(t.activeStreams, s.id)\n\tt.mu.Unlock()\n\tif q := s.fc.restoreConn(); q > 0 {\n\t\tt.controlBuf.put(&windowUpdate{0, q})\n\t}\n\ts.mu.Lock()\n\tif s.state == streamDone {\n\t\ts.mu.Unlock()\n\t\treturn\n\t}\n\ts.state = streamDone\n\ts.mu.Unlock()\n\t// In case stream sending and receiving are invoked in separate\n\t// goroutines (e.g., bi-directional streaming), the caller needs\n\t// to call cancel on the stream to interrupt the blocking on\n\t// other goroutines.\n\ts.cancel()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/transport/http_util.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\npackage transport\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/bradfitz/http2\"\n\t\"github.com/bradfitz/http2/hpack\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/grpclog\"\n\t\"google.golang.org/grpc/metadata\"\n)\n\nconst (\n\t// http2MaxFrameLen specifies the max length of a HTTP2 frame.\n\thttp2MaxFrameLen = 16384 // 16KB frame\n\t// http://http2.github.io/http2-spec/#SettingValues\n\thttp2InitHeaderTableSize = 4096\n\t// http2IOBufSize specifies the buffer size for sending frames.\n\thttp2IOBufSize = 32 * 1024\n)\n\nvar (\n\tclientPreface = []byte(http2.ClientPreface)\n)\n\nvar http2RSTErrConvTab = map[http2.ErrCode]codes.Code{\n\thttp2.ErrCodeNo:                 codes.Internal,\n\thttp2.ErrCodeProtocol:           codes.Internal,\n\thttp2.ErrCodeInternal:           codes.Internal,\n\thttp2.ErrCodeFlowControl:        codes.Internal,\n\thttp2.ErrCodeSettingsTimeout:    codes.Internal,\n\thttp2.ErrCodeFrameSize:          codes.Internal,\n\thttp2.ErrCodeRefusedStream:      codes.Unavailable,\n\thttp2.ErrCodeCancel:             codes.Canceled,\n\thttp2.ErrCodeCompression:        codes.Internal,\n\thttp2.ErrCodeConnect:            codes.Internal,\n\thttp2.ErrCodeEnhanceYourCalm:    codes.ResourceExhausted,\n\thttp2.ErrCodeInadequateSecurity: codes.PermissionDenied,\n}\n\nvar statusCodeConvTab = map[codes.Code]http2.ErrCode{\n\tcodes.Internal:          http2.ErrCodeInternal, // pick an arbitrary one which is matched.\n\tcodes.Canceled:          http2.ErrCodeCancel,\n\tcodes.Unavailable:       http2.ErrCodeRefusedStream,\n\tcodes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,\n\tcodes.PermissionDenied:  http2.ErrCodeInadequateSecurity,\n}\n\n// Records the states during HPACK decoding. Must be reset once the\n// decoding of the entire headers are finished.\ntype decodeState struct {\n\t// statusCode caches the stream status received from the trailer\n\t// the server sent. Client side only.\n\tstatusCode codes.Code\n\tstatusDesc string\n\t// Server side only fields.\n\ttimeoutSet bool\n\ttimeout    time.Duration\n\tmethod     string\n\t// key-value metadata map from the peer.\n\tmdata map[string]string\n}\n\n// An hpackDecoder decodes HTTP2 headers which may span multiple frames.\ntype hpackDecoder struct {\n\th     *hpack.Decoder\n\tstate decodeState\n\terr   error // The err when decoding\n}\n\n// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame.\ntype headerFrame interface {\n\tHeader() http2.FrameHeader\n\tHeaderBlockFragment() []byte\n\tHeadersEnded() bool\n}\n\n// isReservedHeader checks whether hdr belongs to HTTP2 headers\n// reserved by gRPC protocol. Any other headers are classified as the\n// user-specified metadata.\nfunc isReservedHeader(hdr string) bool {\n\tif hdr[0] == ':' {\n\t\treturn true\n\t}\n\tswitch hdr {\n\tcase \"content-type\",\n\t\t\"grpc-message-type\",\n\t\t\"grpc-encoding\",\n\t\t\"grpc-message\",\n\t\t\"grpc-status\",\n\t\t\"grpc-timeout\",\n\t\t\"te\",\n\t\t\"user-agent\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc newHPACKDecoder() *hpackDecoder {\n\td := &hpackDecoder{}\n\td.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) {\n\t\tswitch f.Name {\n\t\tcase \"grpc-status\":\n\t\t\tcode, err := strconv.Atoi(f.Value)\n\t\t\tif err != nil {\n\t\t\t\td.err = StreamErrorf(codes.Internal, \"transport: malformed grpc-status: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\td.state.statusCode = codes.Code(code)\n\t\tcase \"grpc-message\":\n\t\t\td.state.statusDesc = f.Value\n\t\tcase \"grpc-timeout\":\n\t\t\td.state.timeoutSet = true\n\t\t\tvar err error\n\t\t\td.state.timeout, err = timeoutDecode(f.Value)\n\t\t\tif err != nil {\n\t\t\t\td.err = StreamErrorf(codes.Internal, \"transport: malformed time-out: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \":path\":\n\t\t\td.state.method = f.Value\n\t\tdefault:\n\t\t\tif !isReservedHeader(f.Name) {\n\t\t\t\tif d.state.mdata == nil {\n\t\t\t\t\td.state.mdata = make(map[string]string)\n\t\t\t\t}\n\t\t\t\tk, v, err := metadata.DecodeKeyValue(f.Name, f.Value)\n\t\t\t\tif err != nil {\n\t\t\t\t\tgrpclog.Printf(\"Failed to decode (%q, %q): %v\", f.Name, f.Value, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\td.state.mdata[k] = v\n\t\t\t}\n\t\t}\n\t})\n\treturn d\n}\n\nfunc (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {\n\td.err = nil\n\t_, err = d.h.Write(frame.HeaderBlockFragment())\n\tif err != nil {\n\t\terr = StreamErrorf(codes.Internal, \"transport: HPACK header decode error: %v\", err)\n\t}\n\n\tif frame.HeadersEnded() {\n\t\tif closeErr := d.h.Close(); closeErr != nil && err == nil {\n\t\t\terr = StreamErrorf(codes.Internal, \"transport: HPACK decoder close error: %v\", closeErr)\n\t\t}\n\t\tendHeaders = true\n\t}\n\n\tif err == nil && d.err != nil {\n\t\terr = d.err\n\t}\n\treturn\n}\n\nfunc (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {\n\td.err = nil\n\t_, err = d.h.Write(frame.HeaderBlockFragment())\n\tif err != nil {\n\t\terr = StreamErrorf(codes.Internal, \"transport: HPACK header decode error: %v\", err)\n\t}\n\n\tif frame.HeadersEnded() {\n\t\tif closeErr := d.h.Close(); closeErr != nil && err == nil {\n\t\t\terr = StreamErrorf(codes.Internal, \"transport: HPACK decoder close error: %v\", closeErr)\n\t\t}\n\t\tendHeaders = true\n\t}\n\n\tif err == nil && d.err != nil {\n\t\terr = d.err\n\t}\n\treturn\n}\n\ntype timeoutUnit uint8\n\nconst (\n\thour        timeoutUnit = 'H'\n\tminute      timeoutUnit = 'M'\n\tsecond      timeoutUnit = 'S'\n\tmillisecond timeoutUnit = 'm'\n\tmicrosecond timeoutUnit = 'u'\n\tnanosecond  timeoutUnit = 'n'\n)\n\nfunc timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {\n\tswitch u {\n\tcase hour:\n\t\treturn time.Hour, true\n\tcase minute:\n\t\treturn time.Minute, true\n\tcase second:\n\t\treturn time.Second, true\n\tcase millisecond:\n\t\treturn time.Millisecond, true\n\tcase microsecond:\n\t\treturn time.Microsecond, true\n\tcase nanosecond:\n\t\treturn time.Nanosecond, true\n\tdefault:\n\t}\n\treturn\n}\n\nconst maxTimeoutValue int64 = 100000000 - 1\n\n// div does integer division and round-up the result. Note that this is\n// equivalent to (d+r-1)/r but has less chance to overflow.\nfunc div(d, r time.Duration) int64 {\n\tif m := d % r; m > 0 {\n\t\treturn int64(d/r + 1)\n\t}\n\treturn int64(d / r)\n}\n\n// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.\nfunc timeoutEncode(t time.Duration) string {\n\tif d := div(t, time.Nanosecond); d <= maxTimeoutValue {\n\t\treturn strconv.FormatInt(d, 10) + \"n\"\n\t}\n\tif d := div(t, time.Microsecond); d <= maxTimeoutValue {\n\t\treturn strconv.FormatInt(d, 10) + \"u\"\n\t}\n\tif d := div(t, time.Millisecond); d <= maxTimeoutValue {\n\t\treturn strconv.FormatInt(d, 10) + \"m\"\n\t}\n\tif d := div(t, time.Second); d <= maxTimeoutValue {\n\t\treturn strconv.FormatInt(d, 10) + \"S\"\n\t}\n\tif d := div(t, time.Minute); d <= maxTimeoutValue {\n\t\treturn strconv.FormatInt(d, 10) + \"M\"\n\t}\n\t// Note that maxTimeoutValue * time.Hour > MaxInt64.\n\treturn strconv.FormatInt(div(t, time.Hour), 10) + \"H\"\n}\n\nfunc timeoutDecode(s string) (time.Duration, error) {\n\tsize := len(s)\n\tif size < 2 {\n\t\treturn 0, fmt.Errorf(\"transport: timeout string is too short: %q\", s)\n\t}\n\tunit := timeoutUnit(s[size-1])\n\td, ok := timeoutUnitToDuration(unit)\n\tif !ok {\n\t\treturn 0, fmt.Errorf(\"transport: timeout unit is not recognized: %q\", s)\n\t}\n\tt, err := strconv.ParseInt(s[:size-1], 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn d * time.Duration(t), nil\n}\n\ntype framer struct {\n\tnumWriters int32\n\treader     io.Reader\n\twriter     *bufio.Writer\n\tfr         *http2.Framer\n}\n\nfunc newFramer(conn net.Conn) *framer {\n\tf := &framer{\n\t\treader: conn,\n\t\twriter: bufio.NewWriterSize(conn, http2IOBufSize),\n\t}\n\tf.fr = http2.NewFramer(f.writer, f.reader)\n\treturn f\n}\n\nfunc (f *framer) adjustNumWriters(i int32) int32 {\n\treturn atomic.AddInt32(&f.numWriters, i)\n}\n\n// The following writeXXX functions can only be called when the caller gets\n// unblocked from writableChan channel (i.e., owns the privilege to write).\n\nfunc (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {\n\tif err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {\n\tif err := f.fr.WriteData(streamID, endStream, data); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {\n\tif err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {\n\tif err := f.fr.WriteHeaders(p); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {\n\tif err := f.fr.WritePing(ack, data); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {\n\tif err := f.fr.WritePriority(streamID, p); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {\n\tif err := f.fr.WritePushPromise(p); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {\n\tif err := f.fr.WriteRSTStream(streamID, code); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {\n\tif err := f.fr.WriteSettings(settings...); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writeSettingsAck(forceFlush bool) error {\n\tif err := f.fr.WriteSettingsAck(); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {\n\tif err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {\n\t\treturn err\n\t}\n\tif forceFlush {\n\t\treturn f.writer.Flush()\n\t}\n\treturn nil\n}\n\nfunc (f *framer) flushWrite() error {\n\treturn f.writer.Flush()\n}\n\nfunc (f *framer) readFrame() (http2.Frame, error) {\n\treturn f.fr.ReadFrame()\n}\n"
  },
  {
    "path": "vendor/google.golang.org/grpc/transport/transport.go",
    "content": "/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *     * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *     * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and/or other materials provided with the\n * distribution.\n *     * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n/*\nPackage transport defines and implements message oriented communication channel\nto complete various transactions (e.g., an RPC).\n*/\npackage transport\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/metadata\"\n)\n\n// recvMsg represents the received msg from the transport. All transport\n// protocol specific info has been removed.\ntype recvMsg struct {\n\tdata []byte\n\t// nil: received some data\n\t// io.EOF: stream is completed. data is nil.\n\t// other non-nil error: transport failure. data is nil.\n\terr error\n}\n\nfunc (recvMsg) isItem() bool {\n\treturn true\n}\n\n// All items in an out of a recvBuffer should be the same type.\ntype item interface {\n\tisItem() bool\n}\n\n// recvBuffer is an unbounded channel of item.\ntype recvBuffer struct {\n\tc       chan item\n\tmu      sync.Mutex\n\tbacklog []item\n}\n\nfunc newRecvBuffer() *recvBuffer {\n\tb := &recvBuffer{\n\t\tc: make(chan item, 1),\n\t}\n\treturn b\n}\n\nfunc (b *recvBuffer) put(r item) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tb.backlog = append(b.backlog, r)\n\tselect {\n\tcase b.c <- b.backlog[0]:\n\t\tb.backlog = b.backlog[1:]\n\tdefault:\n\t}\n}\n\nfunc (b *recvBuffer) load() {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tif len(b.backlog) > 0 {\n\t\tselect {\n\t\tcase b.c <- b.backlog[0]:\n\t\t\tb.backlog = b.backlog[1:]\n\t\tdefault:\n\t\t}\n\t}\n}\n\n// get returns the channel that receives an item in the buffer.\n//\n// Upon receipt of an item, the caller should call load to send another\n// item onto the channel if there is any.\nfunc (b *recvBuffer) get() <-chan item {\n\treturn b.c\n}\n\n// recvBufferReader implements io.Reader interface to read the data from\n// recvBuffer.\ntype recvBufferReader struct {\n\tctx  context.Context\n\trecv *recvBuffer\n\tlast *bytes.Reader // Stores the remaining data in the previous calls.\n\terr  error\n}\n\n// Read reads the next len(p) bytes from last. If last is drained, it tries to\n// read additional data from recv. It blocks if there no additional data available\n// in recv. If Read returns any non-nil error, it will continue to return that error.\nfunc (r *recvBufferReader) Read(p []byte) (n int, err error) {\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\tdefer func() { r.err = err }()\n\tif r.last != nil && r.last.Len() > 0 {\n\t\t// Read remaining data left in last call.\n\t\treturn r.last.Read(p)\n\t}\n\tselect {\n\tcase <-r.ctx.Done():\n\t\treturn 0, ContextErr(r.ctx.Err())\n\tcase i := <-r.recv.get():\n\t\tr.recv.load()\n\t\tm := i.(*recvMsg)\n\t\tif m.err != nil {\n\t\t\treturn 0, m.err\n\t\t}\n\t\tr.last = bytes.NewReader(m.data)\n\t\treturn r.last.Read(p)\n\t}\n}\n\ntype streamState uint8\n\nconst (\n\tstreamActive    streamState = iota\n\tstreamWriteDone             // EndStream sent\n\tstreamReadDone              // EndStream received\n\tstreamDone                  // sendDone and recvDone or RSTStreamFrame is sent or received.\n)\n\n// Stream represents an RPC in the transport layer.\ntype Stream struct {\n\tid uint32\n\t// nil for client side Stream.\n\tst ServerTransport\n\t// ctx is the associated context of the stream.\n\tctx    context.Context\n\tcancel context.CancelFunc\n\t// method records the associated RPC method of the stream.\n\tmethod string\n\tbuf    *recvBuffer\n\tdec    io.Reader\n\n\t// updateStreams indicates whether the transport's streamsQuota needed\n\t// to be updated when this stream is closed. It is false when the transport\n\t// sticks to the initial infinite value of the number of concurrent streams.\n\t// Ture otherwise.\n\tupdateStreams bool\n\tfc            *inFlow\n\trecvQuota     uint32\n\t// The accumulated inbound quota pending for window update.\n\tupdateQuota uint32\n\t// The handler to control the window update procedure for both this\n\t// particular stream and the associated transport.\n\twindowHandler func(int)\n\n\tsendQuotaPool *quotaPool\n\t// Close headerChan to indicate the end of reception of header metadata.\n\theaderChan chan struct{}\n\t// header caches the received header metadata.\n\theader metadata.MD\n\t// The key-value map of trailer metadata.\n\ttrailer metadata.MD\n\n\tmu sync.RWMutex // guard the following\n\t// headerOK becomes true from the first header is about to send.\n\theaderOk bool\n\tstate    streamState\n\t// true iff headerChan is closed. Used to avoid closing headerChan\n\t// multiple times.\n\theaderDone bool\n\t// the status received from the server.\n\tstatusCode codes.Code\n\tstatusDesc string\n}\n\n// Header acquires the key-value pairs of header metadata once it\n// is available. It blocks until i) the metadata is ready or ii) there is no\n// header metadata or iii) the stream is cancelled/expired.\nfunc (s *Stream) Header() (metadata.MD, error) {\n\tselect {\n\tcase <-s.ctx.Done():\n\t\treturn nil, ContextErr(s.ctx.Err())\n\tcase <-s.headerChan:\n\t\treturn s.header.Copy(), nil\n\t}\n}\n\n// Trailer returns the cached trailer metedata. Note that if it is not called\n// after the entire stream is done, it could return an empty MD. Client\n// side only.\nfunc (s *Stream) Trailer() metadata.MD {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.trailer.Copy()\n}\n\n// ServerTransport returns the underlying ServerTransport for the stream.\n// The client side stream always returns nil.\nfunc (s *Stream) ServerTransport() ServerTransport {\n\treturn s.st\n}\n\n// Context returns the context of the stream.\nfunc (s *Stream) Context() context.Context {\n\treturn s.ctx\n}\n\n// Method returns the method for the stream.\nfunc (s *Stream) Method() string {\n\treturn s.method\n}\n\n// StatusCode returns statusCode received from the server.\nfunc (s *Stream) StatusCode() codes.Code {\n\treturn s.statusCode\n}\n\n// StatusDesc returns statusDesc received from the server.\nfunc (s *Stream) StatusDesc() string {\n\treturn s.statusDesc\n}\n\n// ErrIllegalTrailerSet indicates that the trailer has already been set or it\n// is too late to do so.\nvar ErrIllegalTrailerSet = errors.New(\"transport: trailer has been set\")\n\n// SetTrailer sets the trailer metadata which will be sent with the RPC status\n// by the server. This can only be called at most once. Server side only.\nfunc (s *Stream) SetTrailer(md metadata.MD) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.trailer != nil {\n\t\treturn ErrIllegalTrailerSet\n\t}\n\ts.trailer = md.Copy()\n\treturn nil\n}\n\nfunc (s *Stream) write(m recvMsg) {\n\ts.buf.put(&m)\n}\n\n// Read reads all the data available for this Stream from the transport and\n// passes them into the decoder, which converts them into a gRPC message stream.\n// The error is io.EOF when the stream is done or another non-nil error if\n// the stream broke.\nfunc (s *Stream) Read(p []byte) (n int, err error) {\n\tn, err = s.dec.Read(p)\n\tif err != nil {\n\t\treturn\n\t}\n\ts.windowHandler(n)\n\treturn\n}\n\ntype key int\n\n// The key to save transport.Stream in the context.\nconst streamKey = key(0)\n\n// newContextWithStream creates a new context from ctx and attaches stream\n// to it.\nfunc newContextWithStream(ctx context.Context, stream *Stream) context.Context {\n\treturn context.WithValue(ctx, streamKey, stream)\n}\n\n// StreamFromContext returns the stream saved in ctx.\nfunc StreamFromContext(ctx context.Context) (s *Stream, ok bool) {\n\ts, ok = ctx.Value(streamKey).(*Stream)\n\treturn\n}\n\n// state of transport\ntype transportState int\n\nconst (\n\treachable transportState = iota\n\tunreachable\n\tclosing\n)\n\n// NewServerTransport creates a ServerTransport with conn or non-nil error\n// if it fails.\nfunc NewServerTransport(protocol string, conn net.Conn, maxStreams uint32) (ServerTransport, error) {\n\treturn newHTTP2Server(conn, maxStreams)\n}\n\n// ConnectOptions covers all relevant options for dialing a server.\ntype ConnectOptions struct {\n\tDialer      func(string, time.Duration) (net.Conn, error)\n\tAuthOptions []credentials.Credentials\n\tTimeout     time.Duration\n}\n\n// NewClientTransport establishes the transport with the required ConnectOptions\n// and returns it to the caller.\nfunc NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) {\n\treturn newHTTP2Client(target, opts)\n}\n\n// Options provides additional hints and information for message\n// transmission.\ntype Options struct {\n\t// Indicate whether it is the last piece for this stream.\n\tLast bool\n\t// The hint to transport impl whether the data could be buffered for\n\t// batching write. Transport impl can feel free to ignore it.\n\tDelay bool\n}\n\n// CallHdr carries the information of a particular RPC.\ntype CallHdr struct {\n\tHost   string // peer host\n\tMethod string // the operation to perform on the specified host\n}\n\n// ClientTransport is the common interface for all gRPC client side transport\n// implementations.\ntype ClientTransport interface {\n\t// Close tears down this transport. Once it returns, the transport\n\t// should not be accessed any more. The caller must make sure this\n\t// is called only once.\n\tClose() error\n\n\t// Write sends the data for the given stream. A nil stream indicates\n\t// the write is to be performed on the transport as a whole.\n\tWrite(s *Stream, data []byte, opts *Options) error\n\n\t// NewStream creates a Stream for an RPC.\n\tNewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)\n\n\t// CloseStream clears the footprint of a stream when the stream is\n\t// not needed any more. The err indicates the error incurred when\n\t// CloseStream is called. Must be called when a stream is finished\n\t// unless the associated transport is closing.\n\tCloseStream(stream *Stream, err error)\n\n\t// Error returns a channel that is closed when some I/O error\n\t// happens. Typically the caller should have a goroutine to monitor\n\t// this in order to take action (e.g., close the current transport\n\t// and create a new one) in error case. It should not return nil\n\t// once the transport is initiated.\n\tError() <-chan struct{}\n}\n\n// ServerTransport is the common interface for all gRPC server side transport\n// implementations.\ntype ServerTransport interface {\n\t// WriteStatus sends the status of a stream to the client.\n\tWriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error\n\t// Write sends the data for the given stream.\n\tWrite(s *Stream, data []byte, opts *Options) error\n\t// WriteHeader sends the header metedata for the given stream.\n\tWriteHeader(s *Stream, md metadata.MD) error\n\t// HandleStreams receives incoming streams using the given handler.\n\tHandleStreams(func(*Stream))\n\t// Close tears down the transport. Once it is called, the transport\n\t// should not be accessed any more. All the pending streams and their\n\t// handlers will be terminated asynchronously.\n\tClose() error\n}\n\n// StreamErrorf creates an StreamError with the specified error code and description.\nfunc StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError {\n\treturn StreamError{\n\t\tCode: c,\n\t\tDesc: fmt.Sprintf(format, a...),\n\t}\n}\n\n// ConnectionErrorf creates an ConnectionError with the specified error description.\nfunc ConnectionErrorf(format string, a ...interface{}) ConnectionError {\n\treturn ConnectionError{\n\t\tDesc: fmt.Sprintf(format, a...),\n\t}\n}\n\n// ConnectionError is an error that results in the termination of the\n// entire connection and the retry of all the active streams.\ntype ConnectionError struct {\n\tDesc string\n}\n\nfunc (e ConnectionError) Error() string {\n\treturn fmt.Sprintf(\"connection error: desc = %q\", e.Desc)\n}\n\n// Define some common ConnectionErrors.\nvar ErrConnClosing = ConnectionError{Desc: \"transport is closing\"}\n\n// StreamError is an error that only affects one stream within a connection.\ntype StreamError struct {\n\tCode codes.Code\n\tDesc string\n}\n\nfunc (e StreamError) Error() string {\n\treturn fmt.Sprintf(\"stream error: code = %d desc = %q\", e.Code, e.Desc)\n}\n\n// ContextErr converts the error from context package into a StreamError.\nfunc ContextErr(err error) StreamError {\n\tswitch err {\n\tcase context.DeadlineExceeded:\n\t\treturn StreamErrorf(codes.DeadlineExceeded, \"%v\", err)\n\tcase context.Canceled:\n\t\treturn StreamErrorf(codes.Canceled, \"%v\", err)\n\t}\n\tpanic(fmt.Sprintf(\"Unexpected error from context packet: %v\", err))\n}\n\n// wait blocks until it can receive from ctx.Done, closing, or proceed.\n// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.\n// If it receives from closing, it returns 0, ErrConnClosing.\n// If it receives from proceed, it returns the received integer, nil.\nfunc wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0, ContextErr(ctx.Err())\n\tcase <-closing:\n\t\treturn 0, ErrConnClosing\n\tcase i := <-proceed:\n\t\treturn i, nil\n\t}\n}\n"
  }
]