[
  {
    "path": ".gitignore",
    "content": ".DS_Store\ndist/\n\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: go\n\ngo:\n  - 1.5\n  - tip\n\nenv:\n  - GO15VENDOREXPERIMENT=1\n\ninstall: true\n\nscript:\n  - \"go install ./apps/phosphor\"\n  - \"go install ./apps/phosphord\"\n  - \"go test $(go list ./... | grep -v /vendor/)\"\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM busybox\n\nADD dist/docker/bin/ /phosphor_bin/\nRUN cd /    && ln -s /phosphor_bin/* . \\\n && cd /bin && ln -s /phosphor_bin/* .\n\nEXPOSE 7750 7760 7760/udp\n"
  },
  {
    "path": "Godeps/Godeps.json",
    "content": "{\n\t\"ImportPath\": \"github.com/mondough/phosphor\",\n\t\"GoVersion\": \"go1.5\",\n\t\"Packages\": [\n\t\t\"./...\"\n\t],\n\t\"Deps\": [\n\t\t{\n\t\t\t\"ImportPath\": \"code.google.com/p/snappy-go/snappy\",\n\t\t\t\"Comment\": \"null-15\",\n\t\t\t\"Rev\": \"12e4b4183793ac4b061921e7980845e750679fd0\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/bitly/go-nsq\",\n\t\t\t\"Comment\": \"v1.0.4-13-ga3aee1d\",\n\t\t\t\"Rev\": \"a3aee1d8e104a99d8fedffe2c45832df6a96d735\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/cihub/seelog\",\n\t\t\t\"Comment\": \"go1.1-55-gc40fd0a\",\n\t\t\t\"Rev\": \"c40fd0af694fa48ec870c030f495c26a5bffcf55\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/golang/protobuf/proto\",\n\t\t\t\"Rev\": \"16256d3ce6929458613798ee44b7914a3f59f5c6\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/mreiferson/go-options\",\n\t\t\t\"Rev\": \"7c174072188d0cfbe6f01bb457626abb22bdff52\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"github.com/mreiferson/go-snappystream\",\n\t\t\t\"Comment\": \"v0.2.2-8-ga5260a3\",\n\t\t\t\"Rev\": \"a5260a307b3e7dd583283c1e2717445244d506c7\"\n\t\t},\n\t\t{\n\t\t\t\"ImportPath\": \"golang.org/x/net/context\",\n\t\t\t\"Rev\": \"47990a1ba55743e6ef1affd3a14e5bac8553615d\"\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "Godeps/Readme",
    "content": "This directory tree is generated automatically by godep.\n\nPlease do not edit.\n\nSee https://github.com/tools/godep for more information.\n"
  },
  {
    "path": "LICENCE",
    "content": "Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2015 Matt Heath\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "![](docs/logo.png)\n\nPhosphor is a Distributed Tracing system, similar to [Google's Dapper](https://research.google.com/pubs/pub36356.html),  [Twitter's Zipkin](https://twitter.github.io/zipkin), and [Hailo's Trace Service](https://speakerdeck.com/mattheath/scaling-microservices-in-go-high-load-strategy-2015?slide=45).\n\n![](https://travis-ci.org/mondough/phosphor.svg?branch=master)\n\nIt is comprised of a few simple components:\n\n - [Phosphor Client](https://github.com/mondough/phosphor-go), used to send traces from applications\n - [Phosphor Daemon](https://github.com/mondough/phosphor/tree/master/phosphord), collects traces and forwards onto the main server\n - [Phosphor Server](https://github.com/mondough/phosphor/tree/master/phosphor), stores traces and aggregated trace information\n - Phosphor UI, view trace and debug information about your infrastructure\n\n![Phosphor Architecture](docs/phosphor/outline.png)\n\n## Dependencies\n\n - [NSQ](https://nsq.io) is used as the delivery transport between PhosphorD and the Phosphor Server\n\n## Caveats\n\nThis system is currently in development, and some components are not yet open source. In particular, the persistence layer in this repository is an in-memory mock, and is therefore not appropriate for production usage. Additional storage adaptors will be added in the near future.\n"
  },
  {
    "path": "apps/phosphor/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/mondough/phosphor/internal/util\"\n\t\"github.com/mondough/phosphor/internal/version\"\n\t\"github.com/mondough/phosphor/phosphor\"\n\t\"github.com/mreiferson/go-options\"\n)\n\nfunc phosphorFlagSet() *flag.FlagSet {\n\tflagSet := flag.NewFlagSet(\"phosphor\", flag.ExitOnError)\n\n\t// basic options\n\tflagSet.Bool(\"version\", false, \"print version string\")\n\tflagSet.Bool(\"verbose\", false, \"enable verbose logging\")\n\tflagSet.Int64(\"worker-id\", 0, \"unique seed for message ID generation (int) in range [0,4096) (will default to a hash of hostname)\")\n\tflagSet.String(\"https-address\", \"\", \"<addr>:<port> to listen on for HTTPS clients\")\n\tflagSet.String(\"http-address\", \"0.0.0.0:7750\", \"<addr>:<port> to listen on for HTTP clients\")\n\n\t// NSQ Transport options\n\tnsqLookupdHTTPAddrs := util.StringArray{}\n\tflagSet.Var(&nsqLookupdHTTPAddrs, \"nsqlookupd-http-address\", \"nsqlookupd HTTP address (may be given multiple times)\")\n\tnsqdHTTPAddrs := util.StringArray{}\n\tflagSet.Var(&nsqdHTTPAddrs, \"nsqd-http-address\", \"nsqd HTTP address (may be given multiple times)\")\n\tflagSet.String(\"nsq-topic\", \"phosphor\", \"NSQ topic name to recieve traces from\")\n\tflagSet.String(\"nsq-channel\", \"phosphor-server\", \"NSQ channel name to recieve traces from. This should be the same for all instances of the phosphor servers to spread ingestion work.\")\n\tflagSet.Int(\"nsq-max-inflight\", 200, \"Number of traces to allow NSQ to keep inflight\")\n\tflagSet.Int(\"nsq-num-handlers\", 10, \"Number of concurrent NSQ handlers to run\")\n\n\treturn flagSet\n}\n\nfunc main() {\n\tflagSet := phosphorFlagSet()\n\tflagSet.Parse(os.Args[1:])\n\n\t// Globally seed rand\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tif flagSet.Lookup(\"version\").Value.(flag.Getter).Get().(bool) {\n\t\tfmt.Println(version.String(\"phosphor\"))\n\t\treturn\n\t}\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\n\topts := phosphor.NewOptions()\n\tcfg := map[string]interface{}{}\n\toptions.Resolve(opts, flagSet, cfg)\n\n\tp := phosphor.New(opts)\n\n\tp.Run()\n\t<-signalChan\n\tp.Exit()\n}\n"
  },
  {
    "path": "apps/phosphord/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os\"\n\t\"os/signal\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n\n\tlog \"github.com/cihub/seelog\"\n\t\"github.com/mreiferson/go-options\"\n\n\t\"github.com/mondough/phosphor/internal/util\"\n\t\"github.com/mondough/phosphor/internal/version\"\n\t\"github.com/mondough/phosphor/phosphord\"\n)\n\nfunc phosphordFlagSet() *flag.FlagSet {\n\tflagSet := flag.NewFlagSet(\"phosphord\", flag.ExitOnError)\n\n\t// basic options\n\tflagSet.Bool(\"version\", false, \"print version string\")\n\tflagSet.Bool(\"verbose\", false, \"enable verbose logging\")\n\n\t// forwarder options\n\tflagSet.String(\"udp-address\", \"0.0.0.0:7760\", \"<addr>:<port> to listen for UDP traces\")\n\tflagSet.Int(\"num-forwarders\", 20, \"set the number of workers which buffer and forward traces\")\n\tflagSet.Int(\"buffer-size\", 200, \"set the maximum number of traces buffered per worker before batch sending\")\n\tflagSet.Int(\"flush-interval\", 2000, \"set the maximum flush interval in ms\")\n\n\t// NSQ Transport options\n\tflagSet.String(\"nsq-topic\", \"phosphor\", \"NSQ topic name to recieve traces from\")\n\tnsqdTCPAddrs := util.StringArray{}\n\tflagSet.Var(&nsqdTCPAddrs, \"nsqd-tcp-address\", \"nsqd TCP address (may be given multiple times)\")\n\n\treturn flagSet\n}\n\nfunc main() {\n\tflagSet := phosphordFlagSet()\n\tflagSet.Parse(os.Args[1:])\n\n\tdefer log.Flush()\n\n\t// Globally seed rand\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\t// Use ALL the CPUs\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\t// Immediately print and exit the version number\n\tif flagSet.Lookup(\"version\").Value.(flag.Getter).Get().(bool) {\n\t\tfmt.Println(version.String(\"phosphord\"))\n\t\treturn\n\t}\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\n\topts := phosphord.NewOptions()\n\tcfg := map[string]interface{}{}\n\toptions.Resolve(opts, flagSet, cfg)\n\n\tp := phosphord.New(opts)\n\n\tp.Run()\n\t<-signalChan\n\tp.Exit()\n}\n"
  },
  {
    "path": "dist.sh",
    "content": "#!/bin/bash\n\nset -e\n\nDIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nrm -rf   $DIR/dist/docker\nmkdir -p $DIR/dist/docker\nrm -rf   $DIR/.godeps\nmkdir -p $DIR/.godeps\nexport GOPATH=$DIR/vendor:$GOPATH\n\narch=$(go env GOARCH)\nversion=$(awk '/const Version/ {print $NF}' < $DIR/internal/version/version.go | sed 's/\"//g')\ngoversion=$(go version | awk '{print $3}')\n\nfor os in linux darwin freebsd; do\n    echo \"... building v$version for $os/$arch\"\n    BUILD=$(mktemp -d -t phosphor)\n    TARGET=\"phosphor-$version.$os-$arch.$goversion\"\n    for app in phosphor phosphord; do\n        GOOS=$os GOARCH=$arch CGO_ENABLED=0 go build -o $BUILD/$TARGET/bin/$app ./apps/$app\n    done\n    pushd $BUILD\n    if [ \"$os\" == \"linux\" ]; then\n        cp -r $TARGET/bin $DIR/dist/docker/\n    fi\n    tar czvf $TARGET.tar.gz $TARGET\n    mv $TARGET.tar.gz $DIR/dist\n    popd\n    rm -r $BUILD\ndone\n\ndocker build -t mondough/phosphor:v$version .\nif [[ ! $version == *\"-\"* ]]; then\n    echo \"Tagging mondough/phosphor:v$version as the latest release.\"\n    docker tag -f mondough/phosphor:v$version mondough/phosphor:latest\nfi\n"
  },
  {
    "path": "internal/util/stringarray.go",
    "content": "// The StringArray type is borrowed from NSQ\n// https://github.com/bitly/nsq/blob/master/util/string_array.go\n\npackage util\n\nimport (\n\t\"strings\"\n)\n\ntype StringArray []string\n\nfunc (a *StringArray) Set(s string) error {\n\t*a = append(*a, s)\n\treturn nil\n}\n\nfunc (a *StringArray) String() string {\n\treturn strings.Join(*a, \",\")\n}\n"
  },
  {
    "path": "internal/version/version.go",
    "content": "package version\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\n// Version of the binaries\nconst Version = \"0.0.1\"\n\n// String returns our formatted version string\nfunc String(app string) string {\n\treturn fmt.Sprintf(\"%s v%s (built w/%s)\", app, Version, runtime.Version())\n}\n"
  },
  {
    "path": "phosphor/README.md",
    "content": "# Phosphor\n\nThe Phosphor server receives traces from PhosphorD via NSQ and stores these for later retrieval via its API.\n\n## Usage\n\n```\n-http-address string\n    <addr>:<port> to listen on for HTTP clients (default \"0.0.0.0:7750\")\n-https-address string\n    <addr>:<port> to listen on for HTTPS clients\n-nsq-channel string\n    NSQ channel name to recieve traces from. This should be the same for all instances of the phosphor servers to spread ingestion work. (default \"phosphor-server\")\n-nsq-max-inflight int\n    Number of traces to allow NSQ to keep inflight (default 200)\n-nsq-num-handlers int\n    Number of concurrent NSQ handlers to run (default 10)\n-nsq-topic string\n    NSQ topic name to recieve traces from (default \"phosphor\")\n-nsqd-http-address value\n    nsqd HTTP address (may be given multiple times)\n-nsqlookupd-http-address value\n    nsqlookupd HTTP address (may be given multiple times)\n-verbose\n    enable verbose logging\n-version\n    print version string\n-worker-id int\n    unique seed for message ID generation (int) in range [0,4096) (will default to a hash of hostname)\n```\n"
  },
  {
    "path": "phosphor/context.go",
    "content": "package phosphor\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/context\"\n)\n\nfunc phosphorFromContext(ctx context.Context) (*Phosphor, error) {\n\n\tif p, ok := ctx.Value(\"phosphor\").(*Phosphor); ok {\n\t\treturn p, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Couldn't retrieve Phosphor from Context\")\n\n}\n"
  },
  {
    "path": "phosphor/domain.go",
    "content": "package phosphor\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\t\"time\"\n)\n\n// NewTrace initialises and returns a new Trace\nfunc NewTrace() *Trace {\n\treturn &Trace{\n\t\tAnnotation: make([]*Annotation, 0),\n\t}\n}\n\n// Trace represents a full trace of a request\n// comprised of a number of Annotations\ntype Trace struct {\n\tsync.Mutex\n\n\tAnnotation []*Annotation `json:\"annotations\"`\n}\n\n// AppendAnnotation to a Trace\nfunc (t *Trace) AppendAnnotation(a *Annotation) error {\n\tif t == nil {\n\t\treturn errors.New(\"Trace is Nil\")\n\t}\n\n\tt.Annotation = append(t.Annotation, a)\n\n\treturn nil\n}\n\n// AnnotationType represents an Enum of types of Anotations which Phosphor supports\ntype AnnotationType int32\n\nconst (\n\tUnknownAnnotationType = AnnotationType(0) // No idea...\n\n\t// Calls\n\tReq     = AnnotationType(1) // Client Request dispatch\n\tRsp     = AnnotationType(2) // Client Response received\n\tIn      = AnnotationType(3) // Server Request received\n\tOut     = AnnotationType(4) // Server Response dispatched\n\tTimeout = AnnotationType(5) // Client timed out waiting\n\n\t// Developer initiated annotations\n\t// @todo\n\t// Annotation = AnnotationType(6)\n)\n\n// An Annotation represents the smallest individually recorded component of a trace\n// These can be assembled into spans, and entire traces of a request to our systems\ntype Annotation struct {\n\tTraceId      string // Global Trace Identifier\n\tSpanId       string // Identifier for this span, non unique - eg. RPC calls would have 4 annotation with this id\n\tParentSpanId string // Parent span - eg. nested RPC calls\n\n\tTimestamp time.Time     // Timestamp the event occured, can only be compared on the same machine\n\tDuration  time.Duration // Optional: duration of the event, eg. RPC call\n\n\tHostname    string // Hostname this event originated from\n\tOrigin      string // Fully qualified name of the message origin\n\tDestination string // Optional: Fully qualified name of the message destination\n\n\tAnnotationType AnnotationType // The type of Annotation\n\tAsync          bool           // If the request was fired asynchronously\n\n\tPayload     string            // The payload, eg. RPC body, or Annotation\n\tPayloadSize int32             // Bytes of payload\n\tKeyValue    map[string]string // Key value debug information\n}\n"
  },
  {
    "path": "phosphor/handler.go",
    "content": "package phosphor\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\n\tlog \"github.com/cihub/seelog\"\n\t\"github.com/mondough/phosphor/internal/version\"\n)\n\n// Index\n// @todo return version information etc\nfunc Index(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintf(w, fmt.Sprintf(`{\n\t\t\"name\": \"phosphor\",\n\t\t\"version\": \"%s\"\n\t}`, version.Version))\n}\n\n// TraceLookup retrieves a trace from the persistence layer\nfunc TraceLookup(ctx context.Context) func(http.ResponseWriter, *http.Request) {\n\tp, err := phosphorFromContext(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\ttraceId := r.URL.Query().Get(\"traceId\")\n\t\tif traceId == \"\" {\n\t\t\terrorResponse(r, w, http.StatusBadRequest, errors.New(\"traceId param not provided\"))\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debugf(\"Trace lookup - TraceId: %s\", traceId)\n\t\tt, err := p.Store.ReadTrace(traceId)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Trace lookup failed: %s\", err)\n\t\t\terrorResponse(r, w, http.StatusInternalServerError, fmt.Errorf(\"could not load trace: %s\", err))\n\t\t\treturn\n\t\t}\n\n\t\t// If we don't find the trace return 404\n\t\tif t == nil {\n\t\t\tlog.Debugf(\"Trace not found: %s\", traceId)\n\t\t\terrorResponse(r, w, http.StatusNotFound, errors.New(\"traceId not found\"))\n\t\t\treturn\n\t\t}\n\n\t\t// Return trace\n\t\tresponse(\n\t\t\tr,\n\t\t\tw,\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"trace\": prettyFormatTrace(t),\n\t\t\t},\n\t\t)\n\t}\n}\n\n// response sends the response back to the client, marshaling to JSON\nfunc response(r *http.Request, w http.ResponseWriter, resp interface{}) {\n\twriteResponse(r, w, http.StatusOK, resp)\n}\n\n// errorResponse marshals an error to JSON and returns this to the client\nfunc errorResponse(r *http.Request, w http.ResponseWriter, code int, err error) {\n\tresp := map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t}\n\n\twriteResponse(r, w, code, resp)\n}\n\n// response marshals a response to json and returns to the client\nfunc writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}\n"
  },
  {
    "path": "phosphor/ingester.go",
    "content": "package phosphor\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tnsq \"github.com/bitly/go-nsq\"\n\tlog \"github.com/cihub/seelog\"\n\t\"github.com/golang/protobuf/proto\"\n\n\ttraceproto \"github.com/mondough/phosphor/proto\"\n)\n\nvar (\n\ttopic   = \"trace\"\n\tchannel = \"phosphor-server\"\n\n\tmaxInFlight = 200\n\tconcurrency = 10\n)\n\n// Run the trace ingester, ingesting traces into the provided store\nfunc (p *Phosphor) RunIngester() {\n\tcfg := nsq.NewConfig()\n\tcfg.UserAgent = fmt.Sprintf(\"phosphor go-nsq/%s\", nsq.VERSION)\n\tcfg.MaxInFlight = p.opts.NSQMaxInflight\n\n\tconsumer, err := nsq.NewConsumer(p.opts.NSQTopicName, p.opts.NSQChannelName, cfg)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(1)\n\t}\n\n\tconsumer.AddConcurrentHandlers(&IngestionHandler{\n\t\tstore: p.Store,\n\t}, p.opts.NSQNumHandlers)\n\n\tif len(p.opts.NSQDHTTPAddresses) != 0 {\n\t\terr = consumer.ConnectToNSQDs(p.opts.NSQDHTTPAddresses)\n\t\tif err != nil {\n\t\t\tlog.Critical(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\terr = consumer.ConnectToNSQLookupds(p.opts.NSQLookupdHTTPAddresses)\n\t\tif err != nil {\n\t\t\tlog.Critical(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// Block until exit\n\tselect {\n\tcase <-consumer.StopChan:\n\tcase <-p.exitChan:\n\t}\n}\n\n// IngestionHandler exists to match the NSQ handler interface\ntype IngestionHandler struct {\n\tstore Store\n}\n\n// HandleMessage delivered by NSQ\nfunc (ih *IngestionHandler) HandleMessage(message *nsq.Message) error {\n\n\tp := &traceproto.Annotation{}\n\terr := proto.Unmarshal(message.Body, p)\n\tif err != nil {\n\t\t// returning an error to NSQ will requeue this\n\t\t// failure to unmarshal is permanent\n\t\treturn nil\n\t}\n\n\ta := ProtoToAnnotation(p)\n\tlog.Debugf(\"Received annotation: %+v\", a)\n\n\t// Write to our store\n\tih.store.StoreAnnotation(a)\n\n\treturn nil\n}\n"
  },
  {
    "path": "phosphor/marshaling.go",
    "content": "package phosphor\n\nimport (\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/mondough/phosphor/proto\"\n)\n\nfunc prettyFormatTrace(t *Trace) interface{} {\n\treturn map[string]interface{}{\n\t\t\"annotations\": formatAnnotations(t.Annotation),\n\t}\n}\n\nfunc formatAnnotations(ans []*Annotation) interface{} {\n\n\tsort.Sort(ByTime(ans))\n\n\t// Convert to proto\n\tpa := AnnotationsToProto(ans)\n\n\t// Format nicely as JSON\n\tm := make([]interface{}, 0, len(pa))\n\tfor _, a := range pa {\n\t\tm = append(m, formatAnnotation(a))\n\t}\n\treturn m\n}\n\nfunc formatAnnotation(a *traceproto.Annotation) interface{} {\n\treturn map[string]interface{}{\n\t\t\"trace_id\":    a.TraceId,\n\t\t\"span_id\":     a.SpanId,\n\t\t\"parent_id\":   a.ParentId,\n\t\t\"type\":        a.Type.String(),\n\t\t\"async\":       a.Async,\n\t\t\"timestamp\":   a.Timestamp,\n\t\t\"duration\":    a.Duration,\n\t\t\"hostname\":    a.Hostname,\n\t\t\"origin\":      a.Origin,\n\t\t\"destination\": a.Destination,\n\t\t\"payload\":     a.Payload,\n\t\t\"key_value\":   a.KeyValue,\n\t}\n}\n\ntype ByTime []*Annotation\n\nfunc (s ByTime) Len() int {\n\treturn len(s)\n}\nfunc (s ByTime) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\nfunc (s ByTime) Less(i, j int) bool {\n\treturn s[i].Timestamp.Before(s[j].Timestamp)\n}\n\n// ---\n\n// ProtoToAnnotation converts a proto annotation to our domain\nfunc ProtoToAnnotation(p *traceproto.Annotation) *Annotation {\n\tif p == nil {\n\t\treturn &Annotation{}\n\t}\n\n\treturn &Annotation{\n\t\tTraceId:        p.TraceId,\n\t\tSpanId:         p.SpanId,\n\t\tParentSpanId:   p.ParentId,\n\t\tTimestamp:      microsecondInt64ToTime(p.Timestamp),\n\t\tDuration:       microsecondInt64ToDuration(p.Duration),\n\t\tHostname:       p.Hostname,\n\t\tOrigin:         p.Origin,\n\t\tDestination:    p.Destination,\n\t\tAnnotationType: protoToAnnotationType(p.Type),\n\t\tAsync:          p.Async,\n\t\tPayload:        p.Payload,\n\t\tPayloadSize:    int32(len(p.Payload)),\n\t\tKeyValue:       protoToKeyValue(p.KeyValue),\n\t}\n}\n\n// protoToAnnotationType converts a annotation type in our proto to our domain\nfunc protoToAnnotationType(p traceproto.AnnotationType) AnnotationType {\n\t// Ensure we are within bounds\n\tat := int32(p)\n\tif at > 6 || at < 1 {\n\t\tat = 0\n\t}\n\n\treturn AnnotationType(at)\n}\n\n// annotationTypeToProto converts a annotation type in our domain to proto format\nfunc annotationTypeToProto(at AnnotationType) traceproto.AnnotationType {\n\t// Ensure we are within bounds\n\tp := int32(at)\n\tif p > 6 || p < 1 {\n\t\tp = 0\n\t}\n\n\treturn traceproto.AnnotationType(p)\n}\n\n// microsecondInt64ToTime converts an integer number of microseconds\n// since the epoch to a time\nfunc microsecondInt64ToTime(i int64) time.Time {\n\tµsec := i % 1e6\n\tsec := (i - µsec) / 1e6\n\n\treturn time.Unix(sec, µsec*1e3)\n}\n\n// timeToMicrosecondInt64 converts a time to µseconds since epoch as int64\nfunc timeToMicrosecondInt64(t time.Time) int64 {\n\tsec := t.Unix() * 1e6\n\tµsec := int64(t.Nanosecond() / 1e3)\n\n\treturn sec + µsec\n}\n\n// microsecondInt64ToDuration converts an integer number\n// of microseconds to a duration\nfunc microsecondInt64ToDuration(i int64) time.Duration {\n\treturn time.Duration(i) * time.Microsecond\n}\n\n// durationToMicrosecondInt64 returns a duration to the nearest µs\nfunc durationToMicrosecondInt64(d time.Duration) int64 {\n\treturn d.Nanoseconds() / 1e3\n}\n\n// protoToKeyValue converts a repeated set of proto key values\n// to a map of keys => values\nfunc protoToKeyValue(p []*traceproto.KeyValue) map[string]string {\n\tret := make(map[string]string)\n\tfor _, kv := range p {\n\t\tif p == nil {\n\t\t\tcontinue\n\t\t}\n\t\tret[kv.Key] = kv.Value\n\t}\n\treturn ret\n}\n\n// keyValueToProto converts a map of keys => values to a  repeated set\n// of proto key values\nfunc keyValueToProto(m map[string]string) []*traceproto.KeyValue {\n\tret := make([]*traceproto.KeyValue, 0, len(m))\n\tfor k, v := range m {\n\t\tkv := &traceproto.KeyValue{\n\t\t\tKey:   k,\n\t\t\tValue: v,\n\t\t}\n\t\tret = append(ret, kv)\n\t}\n\treturn ret\n}\n\n// AnnotationsToProto converts a slice of domain annotations to our proto format\nfunc AnnotationsToProto(a []*Annotation) []*traceproto.Annotation {\n\tret := make([]*traceproto.Annotation, 0, len(a))\n\tfor _, v := range a {\n\t\tret = append(ret, AnnotationToProto(v))\n\t}\n\treturn ret\n}\n\n// AnnotationToProto converts a domain annotation to our proto format\nfunc AnnotationToProto(a *Annotation) *traceproto.Annotation {\n\tif a == nil {\n\t\treturn &traceproto.Annotation{}\n\t}\n\n\treturn &traceproto.Annotation{\n\t\tTraceId:  a.TraceId,\n\t\tSpanId:   a.SpanId,\n\t\tParentId: a.ParentSpanId,\n\t\tType:     annotationTypeToProto(a.AnnotationType),\n\t\tAsync:    a.Async,\n\n\t\tTimestamp: timeToMicrosecondInt64(a.Timestamp),\n\t\tDuration:  durationToMicrosecondInt64(a.Duration),\n\n\t\tHostname:    a.Hostname,\n\t\tOrigin:      a.Origin,\n\t\tDestination: a.Destination,\n\t\tPayload:     a.Payload,\n\t\tKeyValue:    keyValueToProto(a.KeyValue),\n\t}\n}\n"
  },
  {
    "path": "phosphor/memorystore.go",
    "content": "package phosphor\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com/cihub/seelog\"\n)\n\ntype MemoryStore struct {\n\tsync.RWMutex\n\ttraces map[string]*Trace\n}\n\n// NewMemoryStore initialises and returns a new MemoryStore\nfunc NewMemoryStore() *MemoryStore {\n\ts := &MemoryStore{\n\t\ttraces: make(map[string]*Trace),\n\t}\n\n\t// run stats worker\n\tgo s.statsLoop()\n\n\treturn s\n}\n\n// ReadTrace retrieves a full Trace, composed of Annotations from the store by ID\nfunc (s *MemoryStore) ReadTrace(id string) (*Trace, error) {\n\tif s == nil {\n\t\treturn nil, ErrStoreNotInitialised\n\t}\n\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn s.traces[id], nil\n}\n\n// StoreAnnotation into the store, if the trace doesn't not already exist\n// this will be created for the global trace ID\nfunc (s *MemoryStore) StoreAnnotation(a *Annotation) error {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s == nil {\n\t\treturn ErrStoreNotInitialised\n\t}\n\tif a == nil {\n\t\treturn ErrInvalidAnnotation\n\t}\n\tif a.TraceId == \"\" {\n\t\treturn ErrInvalidTraceId\n\t}\n\n\t// Load our current trace\n\tt := s.traces[a.TraceId]\n\n\t// Initialise a new trace if we don't have it already\n\tif t == nil {\n\t\tt = NewTrace()\n\t}\n\n\t// Add the new annotation to this\n\tt.AppendAnnotation(a)\n\n\t// Store it back\n\ts.traces[a.TraceId] = t\n\n\treturn nil\n}\n\n// statsLoop loops and outputs stats every 5 seconds\nfunc (s *MemoryStore) statsLoop() {\n\n\ttick := time.NewTicker(5 * time.Second)\n\n\t// @todo listen for shutdown, stop ticker and exit cleanly\n\tfor {\n\t\t<-tick.C // block until tick\n\n\t\ts.printStats()\n\t}\n}\n\n// printStats about the status of the memorystore to stdout\nfunc (s *MemoryStore) printStats() {\n\n\t// Get some data while under the mutex\n\ts.RLock()\n\tcount := len(s.traces)\n\ts.RUnlock()\n\n\t// Separate processing and logging outside of mutex\n\tlog.Infof(\"[MemoryStore] Traces stored: %v\", count)\n}\n"
  },
  {
    "path": "phosphor/options.go",
    "content": "package phosphor\n\nimport (\n\t\"crypto/md5\"\n\t\"hash/crc32\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\ntype Options struct {\n\t// basic options\n\tID           int64  `flag:\"worker-id\" cfg:\"id\"`\n\tVerbose      bool   `flag:\"verbose\"`\n\tHTTPAddress  string `flag:\"http-address\"`\n\tHTTPSAddress string `flag:\"https-address\"`\n\n\t// NSQ Transport options\n\tNSQLookupdHTTPAddresses []string `flag:\"nsqlookupd-http-address\"`\n\tNSQDHTTPAddresses       []string `flag:\"nsqd-http-address\"`\n\tNSQTopicName            string   `flag:\"nsq-topic\"`\n\tNSQChannelName          string   `flag:\"nsq-channel\"`\n\tNSQMaxInflight          int      `flag:\"nsq-max-inflight\"`\n\tNSQNumHandlers          int      `flag:\"nsq-num-handlers\"`\n}\n\nfunc NewOptions() *Options {\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\th := md5.New()\n\tio.WriteString(h, hostname)\n\tdefaultID := int64(crc32.ChecksumIEEE(h.Sum(nil)) % 1024)\n\n\treturn &Options{\n\t\tID: defaultID,\n\n\t\tHTTPAddress: \"0.0.0.0:7750\",\n\n\t\tNSQTopicName:   \"phosphor\",\n\t\tNSQChannelName: \"phosphor-server\",\n\t\tNSQMaxInflight: 200,\n\t\tNSQNumHandlers: 10,\n\t}\n}\n"
  },
  {
    "path": "phosphor/phosphor.go",
    "content": "package phosphor\n\nimport (\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\n\tlog \"github.com/cihub/seelog\"\n)\n\ntype Phosphor struct {\n\topts  *Options\n\tStore Store\n\n\texitChan chan struct{}\n}\n\nfunc New(opts *Options) *Phosphor {\n\treturn &Phosphor{\n\t\topts: opts,\n\t\t// Store: opts.Store,\n\n\t\texitChan: make(chan struct{}),\n\t}\n}\n\nfunc (p *Phosphor) Run() {\n\tlog.Infof(\"Phosphor starting up\")\n\tdefer log.Flush()\n\n\t// Store a reference to phosphor in our context which we can pass\n\t// to other areas of the application, eg the HTTP api\n\tctx := context.Background()\n\tctx = context.WithValue(ctx, \"phosphor\", p)\n\n\t// Initialise a persistent store\n\t// if p.Store == nil {\n\tp.Store = NewMemoryStore()\n\t// }\n\n\t// Initialise trace ingestion\n\tgo p.RunIngester()\n\n\t// Set up API and serve requests\n\thttp.HandleFunc(\"/\", Index)\n\thttp.HandleFunc(\"/trace\", TraceLookup(ctx))\n\tgo http.ListenAndServe(p.opts.HTTPAddress, nil)\n}\n\nfunc (p *Phosphor) Exit() {\n\tlog.Infof(\"Phosphor exiting\")\n\tselect {\n\tcase <-p.exitChan: // check if already closed\n\tdefault:\n\t\tclose(p.exitChan)\n\t}\n}\n"
  },
  {
    "path": "phosphor/store.go",
    "content": "package phosphor\n\nimport \"errors\"\n\ntype Store interface {\n\tReadTrace(id string) (*Trace, error)\n\tStoreAnnotation(a *Annotation) error\n}\n\nvar (\n\tErrStoreNotInitialised = errors.New(\"Store is not initialised\")\n\tErrInvalidAnnotation   = errors.New(\"Annotation is invalid\")\n\tErrInvalidTrace        = errors.New(\"Trace is invalid\")\n\tErrInvalidTraceId      = errors.New(\"TraceId is invalid\")\n)\n"
  },
  {
    "path": "phosphord/README.md",
    "content": "# PhosphorD\n\nPhosphorD is a local forwarder, like StatsD, which receives traces from the Phosphor client, and forwards to the [Phosphor server](https://github.com/mondough/phosphor/tree/master/phosphor).\n\nCurrently this receives Traces over UDP, which prevents clients blocking, but is reasonably reliable on a local machine. In the event this blocks, traces will be dropped and lost.\n\nA future improvement would make this configurable to read from local files, mirroring the behaviour of Dapper Daemons as described in the [Google Dapper](https://research.google.com/pubs/pub36356.html) paper.\n\n## Usage\n\n```\n  -buffer-size int\n    \tset the maximum number of traces buffered per worker before batch sending (default 200)\n  -flush-interval int\n    \tset the maximum flush interval in ms (default 2000)\n  -nsq-topic string\n    \tNSQ topic name to recieve traces from (default \"phosphor\")\n  -nsqd-tcp-address value\n    \tnsqd TCP address (may be given multiple times)\n  -num-forwarders int\n    \tset the number of workers which buffer and forward traces (default 20)\n  -udp-address string\n    \t<addr>:<port> to listen for UDP traces (default \"0.0.0.0:7760\")\n  -verbose\n    \tenable verbose logging\n  -version\n    \tprint version string\n```\n"
  },
  {
    "path": "phosphord/forwarder.go",
    "content": "package phosphord\n\nimport (\n\t\"encoding/json\"\n\t\"time\"\n\n\tlog \"github.com/cihub/seelog\"\n\t\"github.com/golang/protobuf/proto\"\n\n\tpb \"github.com/mondough/phosphor/proto\"\n)\n\nfunc (p *PhosphorD) forward(id int) {\n\n\tlog.Debugf(\"[Forwarder %v] started\", id)\n\n\tvar (\n\t\tb           []byte\n\t\ti           int\n\t\tdecoded     *pb.Annotation\n\t\tjs          []byte\n\t\tbuf         = make([][]byte, 0, p.opts.BufferSize)\n\t\tmetricsTick = time.NewTicker(5 * time.Second)\n\t\ttimeoutTick = time.NewTicker(time.Duration(p.opts.FlushInterval) * time.Millisecond)\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase <-p.exitChan:\n\t\t\treturn\n\t\tcase b = <-p.traceChan:\n\t\t\ti++\n\n\t\t\t// Log the frame if we're in verbose mode\n\t\t\tif p.opts.Verbose {\n\t\t\t\tdecoded = &pb.Annotation{}\n\t\t\t\tif err := proto.Unmarshal(b, decoded); err != nil {\n\t\t\t\t\tlog.Warnf(\"[Forwarder %v] Couldn't decode trace frame\", id)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tjs, _ = json.Marshal(decoded)\n\t\t\t\tlog.Tracef(\"[Forwarder %v] Received message: %s\", id, string(js))\n\t\t\t}\n\n\t\t\t// Add message to our buffer\n\t\t\tbuf = append(buf, b)\n\n\t\t\t// Forward on if we're at our buffer size\n\t\t\tif len(buf) >= p.opts.BufferSize {\n\t\t\t\tp.sendTraces(id, &buf)\n\t\t\t}\n\t\tcase <-timeoutTick.C:\n\t\t\tp.sendTraces(id, &buf)\n\t\tcase <-metricsTick.C:\n\t\t\tlog.Debugf(\"[Forwarder %v] Processed %v messages\", id, i)\n\t\t}\n\t}\n}\n\nfunc (p *PhosphorD) sendTraces(id int, buf *[][]byte) error {\n\t// Don't publish empty buffers\n\tif buf == nil || len(*buf) == 0 {\n\t\treturn nil\n\t}\n\n\t// Attempt to publish\n\tlog.Debugf(\"[Forwarder %v] Sending %v traces\", id, len(*buf))\n\tif err := p.tr.MultiPublish(*buf); err != nil {\n\t\t// we return an error here, but currently ignore it\n\t\t// therefore the behaviour will be reattempting to republish the\n\t\t// buffer when the next trace arrives to this forwarder\n\t\treturn err\n\t}\n\n\t// Empty the buffer on success\n\t*buf = nil\n\n\treturn nil\n}\n"
  },
  {
    "path": "phosphord/options.go",
    "content": "package phosphord\n\ntype Options struct {\n\t// basic options\n\tVerbose       bool   `flag:\"verbose\"`\n\tUDPAddress    string `flag:\"udp-address\"`\n\tNumForwarders int    `flag:\"num-forwarders\"`\n\tBufferSize    int    `flag:\"buffer-size\"`\n\tFlushInterval int    `flag:\"flush-interval\"`\n\n\t// NSQ Transport options\n\tNSQDTCPAddresses []string `flag:\"nsqd-tcp-address\"`\n\tNSQTopicName     string   `flag:\"nsq-topic\"`\n\tNSQMaxInflight   int\n\tNSQNumHandlers   int\n}\n\nfunc NewOptions() *Options {\n\treturn &Options{\n\t\tUDPAddress:    \"0.0.0.0:7760\",\n\t\tNumForwarders: 20,\n\t\tBufferSize:    200,\n\t\tFlushInterval: 2000,\n\n\t\tNSQTopicName:   \"phosphor\",\n\t\tNSQMaxInflight: 200,\n\t\tNSQNumHandlers: 10,\n\t}\n}\n"
  },
  {
    "path": "phosphord/phosphord.go",
    "content": "package phosphord\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\tlog \"github.com/cihub/seelog\"\n\n\t\"github.com/mondough/phosphor/phosphord/transport\"\n)\n\nconst (\n\tUDP = \"udp\"\n)\n\nvar (\n\tpacketSize = 65536 - 8 - 20 // 8-byte UDP header, 20-byte IP header\n)\n\ntype PhosphorD struct {\n\topts *Options\n\ttr   transport.Transport\n\n\ttraceChan chan []byte\n\n\texitChan chan struct{}\n}\n\nfunc New(opts *Options) *PhosphorD {\n\t// Initialise our transport\n\t// TODO ensure this doesn't connect until we Run()\n\ttr, err := transport.NewNSQTransport(opts.NSQTopicName, opts.NSQDTCPAddresses)\n\tif err != nil {\n\t\tlog.Criticalf(err.Error())\n\t\tos.Exit(1)\n\t}\n\n\treturn &PhosphorD{\n\t\topts:      opts,\n\t\ttr:        tr,\n\t\ttraceChan: make(chan []byte),\n\n\t\texitChan: make(chan struct{}),\n\t}\n}\n\nfunc (p *PhosphorD) Run() {\n\tlog.Infof(\"PhosphorD started at %v using %v CPUs\", time.Now(), runtime.NumCPU())\n\n\t// Fire up a number of forwarders to process inbound messages\n\tlog.Infof(\"Starting %v forwarders with buffer size of %v\", p.opts.NumForwarders, p.opts.BufferSize)\n\tfor i := 0; i < p.opts.NumForwarders; i++ {\n\t\tgo p.forward(i)\n\t}\n\n\t// Bind and listen to UDP traffic\n\tgo p.listen()\n}\n\n// Exit and shut down\nfunc (p *PhosphorD) Exit() {\n\tlog.Infof(\"PhosphorD exiting\")\n\tselect {\n\tcase <-p.exitChan: // check if already closed\n\tdefault:\n\t\tclose(p.exitChan)\n\t}\n}\n\n// listen on a UDP socket for trace frames\nfunc (p *PhosphorD) listen() {\n\n\t// Resolve bind address\n\taddress, err := net.ResolveUDPAddr(UDP, p.opts.UDPAddress)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to resolve address: %s\", err.Error())\n\t\treturn\n\t}\n\n\t// Take the resolved address and attempt to listen on the UDP socket\n\tlistener, err := net.ListenUDP(UDP, address)\n\tif err != nil {\n\t\tlog.Errorf(\"ListenUDP error: %s\", err.Error())\n\t\treturn\n\t}\n\tdefer listener.Close()\n\n\t// Listen loop\n\tlog.Infof(\"Listening on %s for UDP trace frames\", address.String())\n\tfor {\n\t\tmessage := make([]byte, packetSize)\n\t\tn, _, err := listener.ReadFrom(message)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tbuf := bytes.NewBuffer(message[0:n])\n\t\t// log.Infof(\"Packet received from %s: %s\", remaddr, string(message[0:n]))\n\n\t\t// Attempt to push into our channel to be processed by a worker\n\t\tselect {\n\n\t\t// Successfully write inbound message to queue\n\t\tcase p.traceChan <- buf.Bytes():\n\n\t\t// Stop listening and shut down\n\t\tcase <-p.exitChan:\n\t\t\treturn\n\n\t\t// Drop message to prevent blocking\n\t\tdefault:\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "phosphord/test/test.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tpb \"github.com/mondough/phosphor/proto\"\n)\n\nconst (\n\tMAX_PACKET_SIZE = 1500 - 8 - 20 // 8-byte UDP header, 20-byte IP header\n)\n\nfunc main() {\n\n\t// Make example trace frame\n\tt := &pb.Annotation{\n\t\tTraceId:     \"aasldjaskjdlsakjdkasjdklasjdlasjdkljdas\",\n\t\tSpanId:      \"8yf8sdg76sg897b98fbuys8b9s6rvs6ducghkfhi27tuw\",\n\t\tParentId:    \"97as8d7s9a7a7dv32hrkqehfkuh23hq8d7h4g7iygs7ih\",\n\t\tType:        pb.AnnotationType_CLIENT_SEND,\n\t\tTimestamp:   time.Now().UnixNano() / 1e3,\n\t\tDuration:    1231312,\n\t\tHostname:    \"somehostname\",\n\t\tOrigin:      \"some.api\",\n\t\tDestination: \"some.service\",\n\t\tPayload:     `{\"boop\":123}`,\n\t}\n\n\t// Marshal to bytes\n\n\tb, err := proto.Marshal(t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"Encoded: %s\\n\", string(b))\n\tfmt.Printf(\"Encoded bytes: %v\\n\", b)\n\n\t// Send via UDP!\n\n\t// Get a conn\n\tc, err := net.DialTimeout(\"udp\", \"localhost:7760\", time.Second)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Write into the connection\n\tvar i int\n\n\tfor j := 0; j < 20; j++ {\n\t\tfor i = 0; i < 500; i++ {\n\t\t\t_, err := c.Write([]byte(b))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\n\tfmt.Println(\"Sent\", i, \"messages\")\n\n\t// fmt.Println(\"Sent %v bytes\", n)\n}\n"
  },
  {
    "path": "phosphord/transport/nsq.go",
    "content": "package transport\n\nimport (\n\t\"errors\"\n\t\"math/rand\"\n\n\tnsq \"github.com/bitly/go-nsq\"\n\tlog \"github.com/cihub/seelog\"\n\n\t\"github.com/mondough/phosphor/internal/util\"\n)\n\nvar (\n\tErrPublishFailure    = errors.New(\"Failed to publish to NSQD\")\n\tErrNoConfiguredNodes = errors.New(\"No NSQD nodes are configured\")\n)\n\n// NewNSQTransport initialises a Transport over NSQ\nfunc NewNSQTransport(topic string, nsqdTCPAddrs util.StringArray) (Transport, error) {\n\n\t// Currently using default config\n\tcfg := nsq.NewConfig()\n\n\t// Create a producer for each nsqd node provided\n\tproducers := make(map[string]*nsq.Producer)\n\tproducersIndex := make([]*nsq.Producer, 0, len(nsqdTCPAddrs))\n\tfor _, addr := range nsqdTCPAddrs {\n\t\tproducer, err := nsq.NewProducer(addr, cfg)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"failed to create nsq.Producer - %s\", err)\n\t\t}\n\t\tproducers[addr] = producer\n\t\tproducersIndex = append(producersIndex, producers[addr])\n\t}\n\n\treturn &NSQPublisher{\n\t\ttopic:          topic,\n\t\tproducers:      producers,\n\t\tproducersIndex: producersIndex,\n\t}, nil\n}\n\ntype NSQPublisher struct {\n\ttopic          string\n\tproducers      map[string]*nsq.Producer\n\tproducersIndex []*nsq.Producer\n}\n\nfunc (p *NSQPublisher) MultiPublish(body [][]byte) error {\n\n\tif len(p.producers) == 0 {\n\t\treturn ErrNoConfiguredNodes\n\t}\n\n\t// Round robin, from a random starting position\n\ti := rand.Intn(len(p.producers)) - 1\n\n\t// Attempt up to our number of configured nodes\n\tfor attempt := 0; attempt < len(p.producers); attempt++ {\n\n\t\t// Move to next host, or cycle back around\n\t\ti++\n\t\tif i >= len(p.producers) {\n\t\t\ti = 0\n\t\t}\n\n\t\t// Attempt to publish\n\t\tpd := p.producersIndex[i]\n\t\tif err := pd.MultiPublish(p.topic, body); err == nil {\n\t\t\t// success!\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// We've run out of nodes, and not managed to publish\n\treturn ErrPublishFailure\n}\n"
  },
  {
    "path": "phosphord/transport/transport.go",
    "content": "package transport\n\ntype Transport interface {\n\tMultiPublish(body [][]byte) error\n}\n"
  },
  {
    "path": "proto/trace.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: github.com/mondough/phosphor/proto/trace.proto\n// DO NOT EDIT!\n\n/*\nPackage traceproto is a generated protocol buffer package.\n\nIt is generated from these files:\n\tgithub.com/mondough/phosphor/proto/trace.proto\n\nIt has these top-level messages:\n\tAnnotation\n\tKeyValue\n*/\npackage traceproto\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype AnnotationType int32\n\nconst (\n\tAnnotationType_UNKNOWN     AnnotationType = 0\n\tAnnotationType_CLIENT_SEND AnnotationType = 1\n\tAnnotationType_CLIENT_RECV AnnotationType = 2\n\tAnnotationType_SERVER_RECV AnnotationType = 3\n\tAnnotationType_SERVER_SEND AnnotationType = 4\n\tAnnotationType_TIMEOUT     AnnotationType = 5\n\tAnnotationType_ANNOTATION  AnnotationType = 6\n)\n\nvar AnnotationType_name = map[int32]string{\n\t0: \"UNKNOWN\",\n\t1: \"CLIENT_SEND\",\n\t2: \"CLIENT_RECV\",\n\t3: \"SERVER_RECV\",\n\t4: \"SERVER_SEND\",\n\t5: \"TIMEOUT\",\n\t6: \"ANNOTATION\",\n}\nvar AnnotationType_value = map[string]int32{\n\t\"UNKNOWN\":     0,\n\t\"CLIENT_SEND\": 1,\n\t\"CLIENT_RECV\": 2,\n\t\"SERVER_RECV\": 3,\n\t\"SERVER_SEND\": 4,\n\t\"TIMEOUT\":     5,\n\t\"ANNOTATION\":  6,\n}\n\nfunc (x AnnotationType) String() string {\n\treturn proto.EnumName(AnnotationType_name, int32(x))\n}\n\ntype Annotation struct {\n\t// The ID of the trace this annotation is a component of\n\tTraceId string `protobuf:\"bytes,1,opt,name=trace_id\" json:\"trace_id,omitempty\"`\n\t// The span this trace corresponds to, in the case this\n\t// is representing a service (REQ/REP) call\n\tSpanId string `protobuf:\"bytes,2,opt,name=span_id\" json:\"span_id,omitempty\"`\n\t// The parent span this trace corresponds to, allowing us\n\t// to correlate trace frames and reconstruct the request\n\tParentId string `protobuf:\"bytes,3,opt,name=parent_id\" json:\"parent_id,omitempty\"`\n\t// The type of annotation we're capturing\n\tType AnnotationType `protobuf:\"varint,4,opt,name=type,enum=traceproto.AnnotationType\" json:\"type,omitempty\"`\n\t// Flag to indicate this is an asynchronous span, which will not have a\n\t// response - eg. just client send and server recv annotations\n\tAsync bool `protobuf:\"varint,5,opt,name=async\" json:\"async,omitempty\"`\n\t// Time since the epoch in microseconds\n\tTimestamp int64 `protobuf:\"varint,6,opt,name=timestamp\" json:\"timestamp,omitempty\"`\n\t// Duration in microseconds\n\t// This should only be used to measure time on the same node\n\t// eg. the duration of service / rpc calls\n\tDuration int64 `protobuf:\"varint,7,opt,name=duration\" json:\"duration,omitempty\"`\n\t// Machine hostname, container name etc\n\tHostname string `protobuf:\"bytes,8,opt,name=hostname\" json:\"hostname,omitempty\"`\n\t// Origin of this annotation, likely a service or application for a RPC\n\tOrigin string `protobuf:\"bytes,9,opt,name=origin\" json:\"origin,omitempty\"`\n\t// Destination of this annotations action\n\t// eg. the service which a request was destined for\n\t// likely not set for annotations\n\tDestination string `protobuf:\"bytes,10,opt,name=destination\" json:\"destination,omitempty\"`\n\t// Payload as a string - eg. JSON encoded\n\tPayload string `protobuf:\"bytes,11,opt,name=payload\" json:\"payload,omitempty\"`\n\t// Repeated series of key value fields for arbitrary data\n\tKeyValue []*KeyValue `protobuf:\"bytes,12,rep,name=key_value\" json:\"key_value,omitempty\"`\n}\n\nfunc (m *Annotation) Reset()         { *m = Annotation{} }\nfunc (m *Annotation) String() string { return proto.CompactTextString(m) }\nfunc (*Annotation) ProtoMessage()    {}\n\nfunc (m *Annotation) GetKeyValue() []*KeyValue {\n\tif m != nil {\n\t\treturn m.KeyValue\n\t}\n\treturn nil\n}\n\ntype KeyValue struct {\n\tKey   string `protobuf:\"bytes,1,opt,name=key\" json:\"key,omitempty\"`\n\tValue string `protobuf:\"bytes,2,opt,name=value\" json:\"value,omitempty\"`\n}\n\nfunc (m *KeyValue) Reset()         { *m = KeyValue{} }\nfunc (m *KeyValue) String() string { return proto.CompactTextString(m) }\nfunc (*KeyValue) ProtoMessage()    {}\n\nfunc init() {\n\tproto.RegisterEnum(\"traceproto.AnnotationType\", AnnotationType_name, AnnotationType_value)\n}\n"
  },
  {
    "path": "proto/trace.proto",
    "content": "syntax=\"proto3\";\n\npackage traceproto;\n\nmessage Annotation {\n  // The ID of the trace this annotation is a component of\n  string trace_id = 1;\n\n  // The span this trace corresponds to, in the case this\n  // is representing a service (REQ/REP) call\n  string span_id = 2;\n\n  // The parent span this trace corresponds to, allowing us\n  // to correlate trace frames and reconstruct the request\n  string parent_id = 3;\n\n  // The type of annotation we're capturing\n  AnnotationType type = 4;\n\n  // Flag to indicate this is an asynchronous span, which will not have a\n  // response - eg. just client send and server recv annotations\n  bool async = 5;\n\n  // Time since the epoch in microseconds\n  int64 timestamp = 6;\n\n  // Duration in microseconds\n  // This should only be used to measure time on the same node\n  // eg. the duration of service / rpc calls\n  int64 duration = 7;\n\n  // Machine hostname, container name etc\n  string hostname = 8;\n\n  // Origin of this annotation, likely a service or application for a RPC\n  string origin = 9;\n\n  // Destination of this annotations action\n  // eg. the service which a request was destined for\n  // likely not set for annotations\n  string destination = 10;\n\n  // Payload as a string - eg. JSON encoded\n  string payload = 11;\n\n  // Repeated series of key value fields for arbitrary data\n  repeated KeyValue key_value = 12;\n}\n\nenum AnnotationType {\n\tUNKNOWN = 0;\n\n\tCLIENT_SEND = 1;\n\tCLIENT_RECV = 2;\n\tSERVER_RECV = 3;\n\tSERVER_SEND = 4;\n\n\tTIMEOUT = 5;\n\n\tANNOTATION = 6;\n}\n\nmessage KeyValue {\n\tstring key = 1;\n\tstring value = 2;\n}\n"
  },
  {
    "path": "script/buildprotobufs.sh",
    "content": "#!/bin/bash\n\n# Basic path locations\nROOT=$(cd $(dirname -- \"$0\" ) && cd .. && pwd)\nMESSAGEPATH=${ROOT}/proto\n\n# SRCPATH is the path to our src directory - everything from here is fully qualified\n# This depends on your storing your code in your GOPATH\n# eg. xxx/github.com/mondough/phosphor\nSRCPATH=$(cd ${ROOT}/../../.. && pwd)\n\n# Cakes are important. and delicious. and should be given out for success.\nfunction dispatchCake() {\n\tprintf \"\\n            \\033[1;33m*\\033[0m  \\033[1;33m*\\033[0m  \\033[1;33m*\\033[0m             \\n\"\n\tprintf \"           \\033[1;33m*\\033[0m\\033[0;31m|\\033[0m_\\033[1;33m*\\033[0m\\033[0;31m|\\033[0m_\\033[1;33m*\\033[0m\\033[0;31m|\\033[0m_\\033[1;33m*\\033[0m           \\n\"\n\tprintf \"       .-'\\`\\033[0;31m|\\033[0m  \\033[0;31m|\\033[0m  \\033[0;31m|\\033[0m  \\033[0;31m|\\033[0m\\`'-.       \\n\"\n\tprintf \"       |\\`-............-'|       \\n\"\n\tprintf \"       |                |       \\n\"\n\tprintf \"       \\   _  .-.   _   /       \\n\"\n\tprintf \"     ,-|'-' '-'  '-' '-'|-,     \\n\"\n\tprintf \"   /\\`  \\._            _./  \\`\\   \\n\"\n\tprintf \"   '._    \\`\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\`    _.'\\n\"\n\tprintf \"     \\`''--..........--''\\`       \\n\\n\"\n\tprintf \"        \\033[1;5;7;32m GREAT SUCCESS! \\033[0m\\n\\n\"\n\tprintf \"\\n\\n\"\n}\n\n# Show which protobufs were found\nprintf \"\\nLocating protobufs...\\n\"\nfind ${MESSAGEPATH} -name '*.proto' -exec echo {} \\;\necho \"\"\n\n# Clean out current protos\nfind ${MESSAGEPATH} -name '*.pb.go' | xargs rm -f\n\n# Try to rebuild all the things\necho \"Generating Go protobuf classes...\"\nfind $MESSAGEPATH -name '*.proto' -exec protoc -I${SRCPATH} --go_out=${SRCPATH} {} \\;\nprintf \"Complete\\n\\n\"\n\n# GREAT SUCCESS\ndispatchCake\n"
  },
  {
    "path": "vendor/code.google.com/p/snappy-go/snappy/decode.go",
    "content": "// Copyright 2011 The Snappy-Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage snappy\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n)\n\n// ErrCorrupt reports that the input is invalid.\nvar ErrCorrupt = errors.New(\"snappy: corrupt input\")\n\n// DecodedLen returns the length of the decoded block.\nfunc DecodedLen(src []byte) (int, error) {\n\tv, _, err := decodedLen(src)\n\treturn v, err\n}\n\n// decodedLen returns the length of the decoded block and the number of bytes\n// that the length header occupied.\nfunc decodedLen(src []byte) (blockLen, headerLen int, err error) {\n\tv, n := binary.Uvarint(src)\n\tif n == 0 {\n\t\treturn 0, 0, ErrCorrupt\n\t}\n\tif uint64(int(v)) != v {\n\t\treturn 0, 0, errors.New(\"snappy: decoded block is too large\")\n\t}\n\treturn int(v), n, nil\n}\n\n// Decode returns the decoded form of src. The returned slice may be a sub-\n// slice of dst if dst was large enough to hold the entire decoded block.\n// Otherwise, a newly allocated slice will be returned.\n// It is valid to pass a nil dst.\nfunc Decode(dst, src []byte) ([]byte, error) {\n\tdLen, s, err := decodedLen(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(dst) < dLen {\n\t\tdst = make([]byte, dLen)\n\t}\n\n\tvar d, offset, length int\n\tfor s < len(src) {\n\t\tswitch src[s] & 0x03 {\n\t\tcase tagLiteral:\n\t\t\tx := uint(src[s] >> 2)\n\t\t\tswitch {\n\t\t\tcase x < 60:\n\t\t\t\ts += 1\n\t\t\tcase x == 60:\n\t\t\t\ts += 2\n\t\t\t\tif s > len(src) {\n\t\t\t\t\treturn nil, ErrCorrupt\n\t\t\t\t}\n\t\t\t\tx = uint(src[s-1])\n\t\t\tcase x == 61:\n\t\t\t\ts += 3\n\t\t\t\tif s > len(src) {\n\t\t\t\t\treturn nil, ErrCorrupt\n\t\t\t\t}\n\t\t\t\tx = uint(src[s-2]) | uint(src[s-1])<<8\n\t\t\tcase x == 62:\n\t\t\t\ts += 4\n\t\t\t\tif s > len(src) {\n\t\t\t\t\treturn nil, ErrCorrupt\n\t\t\t\t}\n\t\t\t\tx = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16\n\t\t\tcase x == 63:\n\t\t\t\ts += 5\n\t\t\t\tif s > len(src) {\n\t\t\t\t\treturn nil, ErrCorrupt\n\t\t\t\t}\n\t\t\t\tx = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24\n\t\t\t}\n\t\t\tlength = int(x + 1)\n\t\t\tif length <= 0 {\n\t\t\t\treturn nil, errors.New(\"snappy: unsupported literal length\")\n\t\t\t}\n\t\t\tif length > len(dst)-d || length > len(src)-s {\n\t\t\t\treturn nil, ErrCorrupt\n\t\t\t}\n\t\t\tcopy(dst[d:], src[s:s+length])\n\t\t\td += length\n\t\t\ts += length\n\t\t\tcontinue\n\n\t\tcase tagCopy1:\n\t\t\ts += 2\n\t\t\tif s > len(src) {\n\t\t\t\treturn nil, ErrCorrupt\n\t\t\t}\n\t\t\tlength = 4 + int(src[s-2])>>2&0x7\n\t\t\toffset = int(src[s-2])&0xe0<<3 | int(src[s-1])\n\n\t\tcase tagCopy2:\n\t\t\ts += 3\n\t\t\tif s > len(src) {\n\t\t\t\treturn nil, ErrCorrupt\n\t\t\t}\n\t\t\tlength = 1 + int(src[s-3])>>2\n\t\t\toffset = int(src[s-2]) | int(src[s-1])<<8\n\n\t\tcase tagCopy4:\n\t\t\treturn nil, errors.New(\"snappy: unsupported COPY_4 tag\")\n\t\t}\n\n\t\tend := d + length\n\t\tif offset > d || end > len(dst) {\n\t\t\treturn nil, ErrCorrupt\n\t\t}\n\t\tfor ; d < end; d++ {\n\t\t\tdst[d] = dst[d-offset]\n\t\t}\n\t}\n\tif d != dLen {\n\t\treturn nil, ErrCorrupt\n\t}\n\treturn dst[:d], nil\n}\n"
  },
  {
    "path": "vendor/code.google.com/p/snappy-go/snappy/encode.go",
    "content": "// Copyright 2011 The Snappy-Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage snappy\n\nimport (\n\t\"encoding/binary\"\n)\n\n// We limit how far copy back-references can go, the same as the C++ code.\nconst maxOffset = 1 << 15\n\n// emitLiteral writes a literal chunk and returns the number of bytes written.\nfunc emitLiteral(dst, lit []byte) int {\n\ti, n := 0, uint(len(lit)-1)\n\tswitch {\n\tcase n < 60:\n\t\tdst[0] = uint8(n)<<2 | tagLiteral\n\t\ti = 1\n\tcase n < 1<<8:\n\t\tdst[0] = 60<<2 | tagLiteral\n\t\tdst[1] = uint8(n)\n\t\ti = 2\n\tcase n < 1<<16:\n\t\tdst[0] = 61<<2 | tagLiteral\n\t\tdst[1] = uint8(n)\n\t\tdst[2] = uint8(n >> 8)\n\t\ti = 3\n\tcase n < 1<<24:\n\t\tdst[0] = 62<<2 | tagLiteral\n\t\tdst[1] = uint8(n)\n\t\tdst[2] = uint8(n >> 8)\n\t\tdst[3] = uint8(n >> 16)\n\t\ti = 4\n\tcase int64(n) < 1<<32:\n\t\tdst[0] = 63<<2 | tagLiteral\n\t\tdst[1] = uint8(n)\n\t\tdst[2] = uint8(n >> 8)\n\t\tdst[3] = uint8(n >> 16)\n\t\tdst[4] = uint8(n >> 24)\n\t\ti = 5\n\tdefault:\n\t\tpanic(\"snappy: source buffer is too long\")\n\t}\n\tif copy(dst[i:], lit) != len(lit) {\n\t\tpanic(\"snappy: destination buffer is too short\")\n\t}\n\treturn i + len(lit)\n}\n\n// emitCopy writes a copy chunk and returns the number of bytes written.\nfunc emitCopy(dst []byte, offset, length int) int {\n\ti := 0\n\tfor length > 0 {\n\t\tx := length - 4\n\t\tif 0 <= x && x < 1<<3 && offset < 1<<11 {\n\t\t\tdst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1\n\t\t\tdst[i+1] = uint8(offset)\n\t\t\ti += 2\n\t\t\tbreak\n\t\t}\n\n\t\tx = length\n\t\tif x > 1<<6 {\n\t\t\tx = 1 << 6\n\t\t}\n\t\tdst[i+0] = uint8(x-1)<<2 | tagCopy2\n\t\tdst[i+1] = uint8(offset)\n\t\tdst[i+2] = uint8(offset >> 8)\n\t\ti += 3\n\t\tlength -= x\n\t}\n\treturn i\n}\n\n// Encode returns the encoded form of src. The returned slice may be a sub-\n// slice of dst if dst was large enough to hold the entire encoded block.\n// Otherwise, a newly allocated slice will be returned.\n// It is valid to pass a nil dst.\nfunc Encode(dst, src []byte) ([]byte, error) {\n\tif n := MaxEncodedLen(len(src)); len(dst) < n {\n\t\tdst = make([]byte, n)\n\t}\n\n\t// The block starts with the varint-encoded length of the decompressed bytes.\n\td := binary.PutUvarint(dst, uint64(len(src)))\n\n\t// Return early if src is short.\n\tif len(src) <= 4 {\n\t\tif len(src) != 0 {\n\t\t\td += emitLiteral(dst[d:], src)\n\t\t}\n\t\treturn dst[:d], nil\n\t}\n\n\t// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.\n\tconst maxTableSize = 1 << 14\n\tshift, tableSize := uint(32-8), 1<<8\n\tfor tableSize < maxTableSize && tableSize < len(src) {\n\t\tshift--\n\t\ttableSize *= 2\n\t}\n\tvar table [maxTableSize]int\n\n\t// Iterate over the source bytes.\n\tvar (\n\t\ts   int // The iterator position.\n\t\tt   int // The last position with the same hash as s.\n\t\tlit int // The start position of any pending literal bytes.\n\t)\n\tfor s+3 < len(src) {\n\t\t// Update the hash table.\n\t\tb0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]\n\t\th := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24\n\t\tp := &table[(h*0x1e35a7bd)>>shift]\n\t\t// We need to to store values in [-1, inf) in table. To save\n\t\t// some initialization time, (re)use the table's zero value\n\t\t// and shift the values against this zero: add 1 on writes,\n\t\t// subtract 1 on reads.\n\t\tt, *p = *p-1, s+1\n\t\t// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.\n\t\tif t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {\n\t\t\ts++\n\t\t\tcontinue\n\t\t}\n\t\t// Otherwise, we have a match. First, emit any pending literal bytes.\n\t\tif lit != s {\n\t\t\td += emitLiteral(dst[d:], src[lit:s])\n\t\t}\n\t\t// Extend the match to be as long as possible.\n\t\ts0 := s\n\t\ts, t = s+4, t+4\n\t\tfor s < len(src) && src[s] == src[t] {\n\t\t\ts++\n\t\t\tt++\n\t\t}\n\t\t// Emit the copied bytes.\n\t\td += emitCopy(dst[d:], s-t, s-s0)\n\t\tlit = s\n\t}\n\n\t// Emit any final pending literal bytes and return.\n\tif lit != len(src) {\n\t\td += emitLiteral(dst[d:], src[lit:])\n\t}\n\treturn dst[:d], nil\n}\n\n// MaxEncodedLen returns the maximum length of a snappy block, given its\n// uncompressed length.\nfunc MaxEncodedLen(srcLen int) int {\n\t// Compressed data can be defined as:\n\t//    compressed := item* literal*\n\t//    item       := literal* copy\n\t//\n\t// The trailing literal sequence has a space blowup of at most 62/60\n\t// since a literal of length 60 needs one tag byte + one extra byte\n\t// for length information.\n\t//\n\t// Item blowup is trickier to measure. Suppose the \"copy\" op copies\n\t// 4 bytes of data. Because of a special check in the encoding code,\n\t// we produce a 4-byte copy only if the offset is < 65536. Therefore\n\t// the copy op takes 3 bytes to encode, and this type of item leads\n\t// to at most the 62/60 blowup for representing literals.\n\t//\n\t// Suppose the \"copy\" op copies 5 bytes of data. If the offset is big\n\t// enough, it will take 5 bytes to encode the copy op. Therefore the\n\t// worst case here is a one-byte literal followed by a five-byte copy.\n\t// That is, 6 bytes of input turn into 7 bytes of \"compressed\" data.\n\t//\n\t// This last factor dominates the blowup, so the final estimate is:\n\treturn 32 + srcLen + srcLen/6\n}\n"
  },
  {
    "path": "vendor/code.google.com/p/snappy-go/snappy/snappy.go",
    "content": "// Copyright 2011 The Snappy-Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package snappy implements the snappy block-based compression format.\n// It aims for very high speeds and reasonable compression.\n//\n// The C++ snappy implementation is at http://code.google.com/p/snappy/\npackage snappy\n\n/*\nEach encoded block begins with the varint-encoded length of the decoded data,\nfollowed by a sequence of chunks. Chunks begin and end on byte boundaries. The\nfirst byte of each chunk is broken into its 2 least and 6 most significant bits\ncalled l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.\nZero means a literal tag. All other values mean a copy tag.\n\nFor literal tags:\n  - If m < 60, the next 1 + m bytes are literal bytes.\n  - Otherwise, let n be the little-endian unsigned integer denoted by the next\n    m - 59 bytes. The next 1 + n bytes after that are literal bytes.\n\nFor copy tags, length bytes are copied from offset bytes ago, in the style of\nLempel-Ziv compression algorithms. In particular:\n  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).\n    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10\n    of the offset. The next byte is bits 0-7 of the offset.\n  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).\n    The length is 1 + m. The offset is the little-endian unsigned integer\n    denoted by the next 2 bytes.\n  - For l == 3, this tag is a legacy format that is no longer supported.\n*/\nconst (\n\ttagLiteral = 0x00\n\ttagCopy1   = 0x01\n\ttagCopy2   = 0x02\n\ttagCopy4   = 0x03\n)\n"
  },
  {
    "path": "vendor/code.google.com/p/snappy-go/snappy/snappy_test.go",
    "content": "// Copyright 2011 The Snappy-Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage snappy\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar download = flag.Bool(\"download\", false, \"If true, download any missing files before running benchmarks\")\n\nfunc roundtrip(b, ebuf, dbuf []byte) error {\n\te, err := Encode(ebuf, b)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"encoding error: %v\", err)\n\t}\n\td, err := Decode(dbuf, e)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"decoding error: %v\", err)\n\t}\n\tif !bytes.Equal(b, d) {\n\t\treturn fmt.Errorf(\"roundtrip mismatch:\\n\\twant %v\\n\\tgot  %v\", b, d)\n\t}\n\treturn nil\n}\n\nfunc TestEmpty(t *testing.T) {\n\tif err := roundtrip(nil, nil, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestSmallCopy(t *testing.T) {\n\tfor _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {\n\t\tfor _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {\n\t\t\tfor i := 0; i < 32; i++ {\n\t\t\t\ts := \"aaaa\" + strings.Repeat(\"b\", i) + \"aaaabbbb\"\n\t\t\t\tif err := roundtrip([]byte(s), ebuf, dbuf); err != nil {\n\t\t\t\t\tt.Errorf(\"len(ebuf)=%d, len(dbuf)=%d, i=%d: %v\", len(ebuf), len(dbuf), i, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSmallRand(t *testing.T) {\n\trand.Seed(27354294)\n\tfor n := 1; n < 20000; n += 23 {\n\t\tb := make([]byte, n)\n\t\tfor i, _ := range b {\n\t\t\tb[i] = uint8(rand.Uint32())\n\t\t}\n\t\tif err := roundtrip(b, nil, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestSmallRegular(t *testing.T) {\n\tfor n := 1; n < 20000; n += 23 {\n\t\tb := make([]byte, n)\n\t\tfor i, _ := range b {\n\t\t\tb[i] = uint8(i%10 + 'a')\n\t\t}\n\t\tif err := roundtrip(b, nil, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc benchDecode(b *testing.B, src []byte) {\n\tencoded, err := Encode(nil, src)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\t// Bandwidth is in amount of uncompressed data.\n\tb.SetBytes(int64(len(src)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tDecode(src, encoded)\n\t}\n}\n\nfunc benchEncode(b *testing.B, src []byte) {\n\t// Bandwidth is in amount of uncompressed data.\n\tb.SetBytes(int64(len(src)))\n\tdst := make([]byte, MaxEncodedLen(len(src)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tEncode(dst, src)\n\t}\n}\n\nfunc readFile(b *testing.B, filename string) []byte {\n\tsrc, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tb.Fatalf(\"failed reading %s: %s\", filename, err)\n\t}\n\tif len(src) == 0 {\n\t\tb.Fatalf(\"%s has zero length\", filename)\n\t}\n\treturn src\n}\n\n// expand returns a slice of length n containing repeated copies of src.\nfunc expand(src []byte, n int) []byte {\n\tdst := make([]byte, n)\n\tfor x := dst; len(x) > 0; {\n\t\ti := copy(x, src)\n\t\tx = x[i:]\n\t}\n\treturn dst\n}\n\nfunc benchWords(b *testing.B, n int, decode bool) {\n\t// Note: the file is OS-language dependent so the resulting values are not\n\t// directly comparable for non-US-English OS installations.\n\tdata := expand(readFile(b, \"/usr/share/dict/words\"), n)\n\tif decode {\n\t\tbenchDecode(b, data)\n\t} else {\n\t\tbenchEncode(b, data)\n\t}\n}\n\nfunc BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }\nfunc BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }\nfunc BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }\nfunc BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }\nfunc BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }\nfunc BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }\nfunc BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }\nfunc BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }\n\n// testFiles' values are copied directly from\n// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.\n// The label field is unused in snappy-go.\nvar testFiles = []struct {\n\tlabel    string\n\tfilename string\n}{\n\t{\"html\", \"html\"},\n\t{\"urls\", \"urls.10K\"},\n\t{\"jpg\", \"house.jpg\"},\n\t{\"pdf\", \"mapreduce-osdi-1.pdf\"},\n\t{\"html4\", \"html_x_4\"},\n\t{\"cp\", \"cp.html\"},\n\t{\"c\", \"fields.c\"},\n\t{\"lsp\", \"grammar.lsp\"},\n\t{\"xls\", \"kennedy.xls\"},\n\t{\"txt1\", \"alice29.txt\"},\n\t{\"txt2\", \"asyoulik.txt\"},\n\t{\"txt3\", \"lcet10.txt\"},\n\t{\"txt4\", \"plrabn12.txt\"},\n\t{\"bin\", \"ptt5\"},\n\t{\"sum\", \"sum\"},\n\t{\"man\", \"xargs.1\"},\n\t{\"pb\", \"geo.protodata\"},\n\t{\"gaviota\", \"kppkn.gtb\"},\n}\n\n// The test data files are present at this canonical URL.\nconst baseURL = \"https://snappy.googlecode.com/svn/trunk/testdata/\"\n\nfunc downloadTestdata(basename string) (errRet error) {\n\tfilename := filepath.Join(\"testdata\", basename)\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create %s: %s\", filename, err)\n\t}\n\tdefer f.Close()\n\tdefer func() {\n\t\tif errRet != nil {\n\t\t\tos.Remove(filename)\n\t\t}\n\t}()\n\tresp, err := http.Get(baseURL + basename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to download %s: %s\", baseURL+basename, err)\n\t}\n\tdefer resp.Body.Close()\n\t_, err = io.Copy(f, resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write %s: %s\", filename, err)\n\t}\n\treturn nil\n}\n\nfunc benchFile(b *testing.B, n int, decode bool) {\n\tfilename := filepath.Join(\"testdata\", testFiles[n].filename)\n\tif stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {\n\t\tif !*download {\n\t\t\tb.Fatal(\"test data not found; skipping benchmark without the -download flag\")\n\t\t}\n\t\t// Download the official snappy C++ implementation reference test data\n\t\t// files for benchmarking.\n\t\tif err := os.Mkdir(\"testdata\", 0777); err != nil && !os.IsExist(err) {\n\t\t\tb.Fatalf(\"failed to create testdata: %s\", err)\n\t\t}\n\t\tfor _, tf := range testFiles {\n\t\t\tif err := downloadTestdata(tf.filename); err != nil {\n\t\t\t\tb.Fatalf(\"failed to download testdata: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\tdata := readFile(b, filename)\n\tif decode {\n\t\tbenchDecode(b, data)\n\t} else {\n\t\tbenchEncode(b, data)\n\t}\n}\n\n// Naming convention is kept similar to what snappy's C++ implementation uses.\nfunc Benchmark_UFlat0(b *testing.B)  { benchFile(b, 0, true) }\nfunc Benchmark_UFlat1(b *testing.B)  { benchFile(b, 1, true) }\nfunc Benchmark_UFlat2(b *testing.B)  { benchFile(b, 2, true) }\nfunc Benchmark_UFlat3(b *testing.B)  { benchFile(b, 3, true) }\nfunc Benchmark_UFlat4(b *testing.B)  { benchFile(b, 4, true) }\nfunc Benchmark_UFlat5(b *testing.B)  { benchFile(b, 5, true) }\nfunc Benchmark_UFlat6(b *testing.B)  { benchFile(b, 6, true) }\nfunc Benchmark_UFlat7(b *testing.B)  { benchFile(b, 7, true) }\nfunc Benchmark_UFlat8(b *testing.B)  { benchFile(b, 8, true) }\nfunc Benchmark_UFlat9(b *testing.B)  { benchFile(b, 9, true) }\nfunc Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }\nfunc Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }\nfunc Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }\nfunc Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }\nfunc Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }\nfunc Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }\nfunc Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }\nfunc Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }\nfunc Benchmark_ZFlat0(b *testing.B)  { benchFile(b, 0, false) }\nfunc Benchmark_ZFlat1(b *testing.B)  { benchFile(b, 1, false) }\nfunc Benchmark_ZFlat2(b *testing.B)  { benchFile(b, 2, false) }\nfunc Benchmark_ZFlat3(b *testing.B)  { benchFile(b, 3, false) }\nfunc Benchmark_ZFlat4(b *testing.B)  { benchFile(b, 4, false) }\nfunc Benchmark_ZFlat5(b *testing.B)  { benchFile(b, 5, false) }\nfunc Benchmark_ZFlat6(b *testing.B)  { benchFile(b, 6, false) }\nfunc Benchmark_ZFlat7(b *testing.B)  { benchFile(b, 7, false) }\nfunc Benchmark_ZFlat8(b *testing.B)  { benchFile(b, 8, false) }\nfunc Benchmark_ZFlat9(b *testing.B)  { benchFile(b, 9, false) }\nfunc Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }\nfunc Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }\nfunc Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }\nfunc Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }\nfunc Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }\nfunc Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }\nfunc Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }\nfunc Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/.travis.yml",
    "content": "language: go\ngo:\n  - 1.4.2\nenv:\n  - NSQ_DOWNLOAD=nsq-0.2.30.linux-amd64.go1.3 GOARCH=amd64\n  - NSQ_DOWNLOAD=nsq-0.2.30.linux-amd64.go1.3 GOARCH=386\n  - NSQ_DOWNLOAD=nsq-0.2.31.linux-amd64.go1.3.1 GOARCH=amd64\n  - NSQ_DOWNLOAD=nsq-0.2.31.linux-amd64.go1.3.1 GOARCH=386\n  - NSQ_DOWNLOAD=nsq-0.3.0.linux-amd64.go1.3.3 GOARCH=amd64\n  - NSQ_DOWNLOAD=nsq-0.3.0.linux-amd64.go1.3.3 GOARCH=386\n  - NSQ_DOWNLOAD=nsq-0.3.1.linux-amd64.go1.4.1 GOARCH=amd64\n  - NSQ_DOWNLOAD=nsq-0.3.1.linux-amd64.go1.4.1 GOARCH=386\n  - NSQ_DOWNLOAD=nsq-0.3.2.linux-amd64.go1.4.1 GOARCH=amd64\n  - NSQ_DOWNLOAD=nsq-0.3.2.linux-amd64.go1.4.1 GOARCH=386\ninstall:\n  - go get github.com/bitly/go-simplejson\n  - go get github.com/mreiferson/go-snappystream\nscript:\n  - wget http://bitly-downloads.s3.amazonaws.com/nsq/$NSQ_DOWNLOAD.tar.gz\n  - tar zxvf $NSQ_DOWNLOAD.tar.gz\n  - export PATH=$NSQ_DOWNLOAD/bin:$PATH\n  - pushd $TRAVIS_BUILD_DIR\n  - ./test.sh\n  - popd\nnotifications:\n  email: false\n\nsudo: false\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/ChangeLog.md",
    "content": "## go-nsq Change Log\n\n### 1.0.4 - 2015-04-07\n\n**Upgrading from 1.0.3**: There are no backward incompatible changes.\n\n * #133 - fix `ErrNotConnected` race during `Producer` connection (thanks @jeddenlea)\n * #132 - fix `RDY` redistribution after backoff with no connections\n * #128 - fix backoff stall when using `RequeueWithoutBackoff`\n * #127 - fix handling of connection closing when resuming after backoff (thanks @jnewmano)\n * #126 - allow `BackoffStrategy` to be set via flag (thanks @twmb)\n * #125 - add pluggable consumer `BackoffStrategy`; add full-jitter strategy (thanks @hden)\n * #124 - add `DialTimeout` and `LocalAddr` config (thanks @yashkin)\n * #119 - add `Producer.Ping()` method (thanks @zulily)\n * #122 - refactor log level string handling\n * #120 - fix `Message` data races on `responded`\n * #114 - fix lookupd jitter having no effect (thanks @judwhite)\n\n### 1.0.3 - 2015-02-07\n\n**Upgrading from 1.0.2**: There are no backward incompatible changes.\n\n * #104 - fix reconnect address bug (thanks @ryanslade)\n * #106 - fix backoff reconnect deadlock (thanks @ryanslade)\n * #107 - fix out-of-bounds error when removing nsqlookupd addresses (thanks @andreas)\n * #108 - fix potential logger race conditions (thanks @judwhite)\n * #111 - fix resolved address error in reconnect loop (thanks @twmb)\n\n### 1.0.2 - 2015-01-21\n\n**Upgrading from 1.0.1**: There are no backward incompatible changes.\n\n * #102 - TLS min/max config defaults (thanks @twmb)\n * #99 - fix `Consumer.Stop()` race and `Producer.Stop()` deadlock (thanks @tylertreat)\n * #92 - expose `Message.NSQDAddress`\n * #95 - cleanup panic during `Consumer.Stop()` if handlers are deadlocked\n * #98 - add `tls-min-version` option (thanks @twmb)\n * #93 - expose a way to get `Consumer` runtime stats (thanks @dcarney)\n * #94 - allow `#ephemeral` topic names (thanks @jamesgroat)\n\n### 1.0.1 - 2014-11-09\n\n**Upgrading from 1.0.0**: There are no backward incompatible changes functionally, however this\nrelease no longer compiles with Go `1.0.x`.\n\n * #89 - don't spam connection teardown cleanup messages\n * #91 - add consumer `DisconnectFrom*`\n * #87 - allow `heartbeat_interval` and `output_buffer_timeout` to be disabled\n * #86 - pluggable `nsqlookupd` behaviors\n * #83 - send `RDY` before `FIN`/`REQ` (forwards compatibility with bitly/nsq#404)\n * #82 - fix panic when conn isn't assigned\n * #75/#76 - minor config related bug fixes\n * #75/#77/#78 - add `tls-cert` and `tls-key` config options\n\n### 1.0.0 - 2014-08-11\n\n**Upgrading from 0.3.7**: The public API was significantly refactored and is not backwards\ncompatible, please read [UPGRADING](UPGRADING.md).\n\n * #58 - support `IDENTIFY` `msg_timeout`\n * #54 - per-connection TLS config and set `ServerName`\n * #49 - add common connect helpers\n * #43/#63 - more flexible `nsqlookupd` URL specification\n * #35 - `AUTH` support\n * #41/#62 - use package private RNG\n * #36 - support 64 character topic/channel names\n * #30/#38/#39/#42/#45/#46/#48/#51/#52/#65/#70 - refactor public API (see [UPGRADING](UPGRADING.md))\n\n### 0.3.7 - 2014-05-25\n\n**Upgrading from 0.3.6**: There are no backward incompatible changes. **THIS IS THE LAST STABLE\nRELEASE PROVIDING THIS API**. Future releases will be based on the api in #30 and **will not be\nbackwards compatible!**\n\nThis is a bug fix release relating to the refactoring done in `0.3.6`.\n\n * #32 - fix potential panic for race condition when # conns == 0\n * #33/#34 - more granular connection locking\n\n### 0.3.6 - 2014-04-29\n\n**Upgrading from 0.3.5**: There are no backward incompatible changes.\n\nThis release includes a significant internal refactoring, designed\nto better encapsulate responsibility, see #19.\n\nSpecifically:\n\n * make `Conn` public\n * move transport responsibilities into `Conn` from `Reader`/`Writer`\n * supply callbacks for hooking into `Conn` events\n\nAs part of the refactoring, a few additional clean exit related \nissues were resolved:\n\n * wait group now includes all exit related goroutines\n * ensure that readLoop exits before exiting cleanup\n * always check messagesInFlight at readLoop exit\n * close underlying connection last\n\n### 0.3.5 - 2014-04-05\n\n**Upgrading from 0.3.4**: There are no backward incompatible changes.\n\nThis release includes a few new features such as support for channel\nsampling and sending along a user agent string (which is now displayed\nin `nsqadmin`).\n\nAlso, a critical bug fix for potential deadlocks (thanks @kjk\nfor reporting and help testing).\n\nNew Features/Improvements:\n\n * #27 - reader logs disambiguate topic/channel\n * #22 - channel sampling\n * #23 - user agent\n\nBug Fixes:\n\n * #24 - fix racey reader IDENTIFY buffering\n * #29 - fix recursive RLock deadlocks\n\n### 0.3.4 - 2013-11-19\n\n**Upgrading from 0.3.3**: There are no backward incompatible changes.\n\nThis is a bug fix release, notably potential deadlocks in `Message.Requeue()` and `Message.Touch()`\nas well as a potential busy loop cleaning up closed connections with in-flight messages.\n\nNew Features/Improvements:\n\n * #14 - add `Reader.Configure()`\n * #18 - return an exported error when an `nsqlookupd` address is already configured\n\nBug Fixes:\n\n * #15 - dont let `handleError()` loop if already connected\n * #17 - resolve potential deadlocks on `Message` responders\n * #16 - eliminate busy loop when draining `finishedMessages`\n\n### 0.3.3 - 2013-10-21\n\n**Upgrading from 0.3.2**: This release requires NSQ binary version `0.2.23+` for compression\nsupport.\n\nThis release contains significant `Reader` refactoring of the RDY handling code paths. The\nmotivation is documented in #1 however the commits in #8 identify individual changes. Additionally,\nwe eliminated deadlocks during connection cleanup in `Writer`.\n\nAs a result, both user-facing APIs should now be considerably more robust and stable. Additionally,\n`Reader` should behave better when backing off.\n\nNew Features/Improvements:\n\n * #9 - ability to ignore publish responses in `Writer`\n * #12 - `Requeue()` method on `Message`\n * #6 - `Touch()` method on `Message`\n * #4 - snappy/deflate feature negotiation\n\nBug Fixes:\n\n * #8 - `Reader` RDY handling refactoring (race conditions, deadlocks, consolidation)\n * #13 - fix `Writer` deadlocks\n * #10 - stop accessing simplejson internals\n * #5 - fix `max-in-flight` race condition\n\n### 0.3.2 - 2013-08-26\n\n**Upgrading from 0.3.1**: This release requires NSQ binary version `0.2.22+` for TLS support.\n\nNew Features/Improvements:\n\n * #227 - TLS feature negotiation\n * #164/#202/#255 - add `Writer`\n * #186 - `MaxBackoffDuration` of `0` disables backoff\n * #175 - support for `nsqd` config option `--max-rdy-count`\n * #169 - auto-reconnect to hard-coded `nsqd`\n\nBug Fixes:\n\n * #254/#256/#257 - new connection RDY starvation\n * #250 - `nsqlookupd` polling improvements\n * #243 - limit `IsStarved()` to connections w/ in-flight messages\n * #169 - use last RDY count for `IsStarved()`; redistribute RDY state\n * #204 - fix early termination blocking\n * #177 - support `broadcast_address`\n * #161 - connection pool goroutine safety\n\n### 0.3.1 - 2013-02-07\n\n**Upgrading from 0.3.0**: This release requires NSQ binary version `0.2.17+` for `TOUCH` support.\n\n * #119 - add TOUCH command\n * #133 - improved handling of errors/magic\n * #127 - send IDENTIFY (missed in #90)\n * #16 - add backoff to Reader\n\n### 0.3.0 - 2013-01-07\n\n**Upgrading from 0.2.4**: There are no backward incompatible changes to applications\nwritten against the public `nsq.Reader` API.\n\nHowever, there *are* a few backward incompatible changes to the API for applications that \ndirectly use other public methods, or properties of a few NSQ data types:\n\n`nsq.Message` IDs are now a type `nsq.MessageID` (a `[16]byte` array).  The signatures of\n`nsq.Finish()` and `nsq.Requeue()` reflect this change.\n\n`nsq.SendCommand()` and `nsq.Frame()` were removed in favor of `nsq.SendFramedResponse()`.\n\n`nsq.Subscribe()` no longer accepts `shortId` and `longId`.  If upgrading your consumers\nbefore upgrading your `nsqd` binaries to `0.2.16-rc.1` they will not be able to send the \noptional custom identifiers.\n    \n * #90 performance optimizations\n * #81 reader performance improvements / MPUB support\n\n### 0.2.4 - 2012-10-15\n\n * #69 added IsStarved() to reader API\n\n### 0.2.3 - 2012-10-11\n\n * #64 timeouts on reader queries to lookupd\n * #54 fix crash issue with reader cleaning up from unexpectedly closed nsqd connections\n\n### 0.2.2 - 2012-10-09\n\n * Initial public release\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/LICENSE",
    "content": "Permission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/README.md",
    "content": "## go-nsq\n\nThe official Go package for [NSQ][nsq].\n\n[![Build Status](https://secure.travis-ci.org/bitly/go-nsq.png?branch=master)][travis] [![GoDoc](https://godoc.org/github.com/bitly/go-nsq?status.svg)](https://godoc.org/github.com/bitly/go-nsq)\n\nThe latest stable release is **[1.0.4][latest_tag]**.\n\nNOTE: The public API has been refactored as of `v1.0.0` and is not backwards compatible with\nprevious releases. **[0.3.7][legacy]** is the last stable release compatible with the legacy API.\nPlease read the [UPGRADING](UPGRADING.md) guide.\n\n### Docs\n\nSee [godoc][nsq_gopkgdoc].\n\nSee the [main repo apps][apps] directory for examples of clients built using this package.\n\n[nsq]: https://github.com/bitly/nsq\n[nsq_gopkgdoc]: http://godoc.org/github.com/bitly/go-nsq\n[protocol]: http://bitly.github.io/nsq/clients/tcp_protocol_spec.html\n[apps]: https://github.com/bitly/nsq/tree/master/apps\n[consumer]: http://godoc.org/github.com/bitly/go-nsq#Consumer\n[producer]: http://godoc.org/github.com/bitly/go-nsq#Producer\n[pr30]: https://github.com/bitly/go-nsq/pull/30\n[legacy]: https://github.com/bitly/go-nsq/releases/tag/v0.3.7\n[travis]: http://travis-ci.org/bitly/go-nsq\n[latest_tag]: https://github.com/bitly/go-nsq/releases/tag/v1.0.4\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/UPGRADING.md",
    "content": "This outlines the backwards incompatible changes that were made to the public API after the\n`v0.3.7` stable release, and and how to migrate existing legacy codebases.\n\n#### Background\n\nThe original `go-nsq` codebase is some of our earliest Go code, and one of our first attempts at a\npublic Go library.\n\nWe've learned a lot over the last 2 years and we wanted `go-nsq` to reflect the experiences we've\nhad working with the library as well as the general Go conventions and best practices we picked up\nalong the way.\n\nThe diff can be seen via: https://github.com/bitly/go-nsq/compare/v0.3.7...HEAD\n\nThe bulk of the refactoring came via: https://github.com/bitly/go-nsq/pull/30\n\n#### Naming\n\nPreviously, the high-level types we exposed were named `nsq.Reader` and `nsq.Writer`. These\nreflected internal naming conventions we had used at bitly for some time but conflated semantics\nwith what a typical Go developer would expect (they obviously did not implement `io.Reader` and\n`io.Writer`).\n\nWe renamed these types to `nsq.Consumer` and `nsq.Producer`, which more effectively communicate\ntheir purpose and is consistent with the NSQ documentation.\n\n#### Configuration\n\nIn the previous API there were inconsistent and confusing ways to configure your clients.\n\nNow, configuration is performed *before* creating an `nsq.Consumer` or `nsq.Producer` by creating\nan `nsq.Config` struct. The only valid way to do this is via `nsq.NewConfig` (i.e. using a struct\nliteral will panic due to invalid internal state).\n\nThe `nsq.Config` struct has exported variables that can be set directly in a type-safe manner. You\ncan also call `cfg.Validate()` to check that the values are correct and within range.\n\n`nsq.Config` also exposes a convenient helper method `Set(k string, v interface{})` that can set\noptions by *coercing* the supplied `interface{}` value.\n\nThis is incredibly convenient if you're reading options from a config file or in a serialized\nformat that does not exactly match the native types.\n\nIt is both flexible and forgiving.\n\n#### Improving the nsq.Handler interface\n\n`go-nsq` attempts to make writing the common use case consumer incredibly easy.\n\nYou specify a type that implements the `nsq.Handler` interface, the interface method is called per\nmessage, and the return value of said method indicates to the library what the response to `nsqd`\nshould be (`FIN` or `REQ`), all the while managing flow control and backoff.\n\nHowever, more advanced use cases require the ability to respond to a message *later*\n(\"asynchronously\", if you will). Our original API provided a *second* message handler interface\ncalled `nsq.AsyncHandler`.\n\nUnfortunately, it was never obvious from the name alone (or even the documentation) how to properly\nuse this form. The API was needlessly complex, involving the garbage creation of wrapping structs\nto track state and respond to messages.\n\nWe originally had the same problem in `pynsq`, our Python client library, and we were able to\nresolve the tension and expose an API that was robust and supported all use cases.\n\nThe new `go-nsq` message handler interface exposes only `nsq.Handler`, and its `HandleMessage`\nmethod remains identical (specifically, `nsq.AsyncHandler` has been removed).\n\nAdditionally, the API to configure handlers has been improved to provide better first-class support\nfor common operations. We've added `AddConcurrentHandlers` (for quickly spawning multiple handler\ngoroutines).\n\nFor the most common use case, where you want `go-nsq` to respond to messages on your behalf, there\nare no changes required! In fact, we've made it even easier to implement the `nsq.Handler`\ninterface for simple functions by providing the `nsq.HandlerFunc` type (in the spirit of the Go\nstandard library's `http.HandlerFunc`):\n\n```go\nr, err := nsq.NewConsumer(\"test_topic\", \"test_channel\", nsq.NewConfig())\nif err != nil {\n    log.Fatalf(err.Error())\n}\n\nr.AddHandler(nsq.HandlerFunc(func(m *nsq.Message) error {\n    return doSomeWork(m)\n})\n\nerr := r.ConnectToNSQD(nsqdAddr)\nif err != nil {\n    log.Fatalf(err.Error())\n}\n\n<-r.StopChan\n```\n\nIn the new API, we've made the `nsq.Message` struct more robust, giving it the ability to proxy\nresponses. If you want to usurp control of the message from `go-nsq`, you simply call\n`msg.DisableAutoResponse()`.\n\nThis is effectively the same as if you had used `nsq.AsyncHandler`, only you don't need to manage\n`nsq.FinishedMessage` structs or implement a separate interface. Instead you just keep/pass\nreferences to the `nsq.Message` itself, and when you're ready to respond you call `msg.Finish()`,\n`msg.Requeue(<duration>)` or `msg.Touch(<duration>)`.  Additionally, this means you can make this\ndecision on a *per-message* basis rather than for the lifetime of the handler.\n\nHere is an example:\n\n```go\ntype myHandler struct {}\n\nfunc (h *myHandler) HandleMessage(m *nsq.Message) error {\n    m.DisableAutoResponse()\n    workerChan <- m\n    return nil\n}\n\ngo func() {\n    for m := range workerChan {\n        err := doSomeWork(m)\n        if err != nil {\n            m.Requeue(-1)\n            continue\n        }\n        m.Finish()\n    }\n}()\n\ncfg := nsq.NewConfig()\ncfg.MaxInFlight = 1000\nr, err := nsq.NewConsumer(\"test_topic\", \"test_channel\", cfg)\nif err != nil {\n    log.Fatalf(err.Error())\n}\nr.AddConcurrentHandlers(&myHandler{}, 20)\n\nerr := r.ConnectToNSQD(nsqdAddr)\nif err != nil {\n    log.Fatalf(err.Error())\n}\n\n<-r.StopChan\n```\n\n#### Requeue without backoff\n\nAs a side effect of the message handler restructuring above, it is now trivial to respond to a\nmessage without triggering a backoff state in `nsq.Consumer` (which was not possible in the\nprevious API).\n\nThe `nsq.Message` type now has a `msg.RequeueWithoutBackoff()` method for this purpose.\n\n#### Producer Error Handling\n\nPreviously, `Writer` (now `Producer`) returned a triplicate of `frameType`, `responseBody`, and\n`error` from calls to `*Publish`.\n\nThis required the caller to check both `error` and `frameType` to confirm success. `Producer`\npublish methods now return only `error`.\n\n#### Logging\n\nOne of the challenges library implementors face is how to provide feedback via logging, while\nexposing an interface that follows the standard library and still provides a means to control and\nconfigure the output.\n\nIn the new API, we've provided a method on `Consumer` and `Producer` called `SetLogger` that takes\nan interface compatible with the Go standard library `log.Logger` (which can be instantiated via\n`log.NewLogger`) and a traditional log level integer `nsq.LogLevel{Debug,Info,Warning,Error}`:\n\n    Output(maxdepth int, s string) error\n\nThis gives the user the flexibility to control the format, destination, and verbosity while still\nconforming to standard library logging conventions.\n\n#### Misc.\n\nUn-exported `NewDeadlineTransport` and `ApiRequest`, which never should have been exported in the\nfirst place.\n\n`nsq.Message` serialization switched away from `binary.{Read,Write}` for performance and\n`nsq.Message` now implements the `io.WriterTo` interface.\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/api_request.go",
    "content": "package nsq\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"time\"\n)\n\ntype deadlinedConn struct {\n\tTimeout time.Duration\n\tnet.Conn\n}\n\nfunc (c *deadlinedConn) Read(b []byte) (n int, err error) {\n\tc.Conn.SetReadDeadline(time.Now().Add(c.Timeout))\n\treturn c.Conn.Read(b)\n}\n\nfunc (c *deadlinedConn) Write(b []byte) (n int, err error) {\n\tc.Conn.SetWriteDeadline(time.Now().Add(c.Timeout))\n\treturn c.Conn.Write(b)\n}\n\nfunc newDeadlineTransport(timeout time.Duration) *http.Transport {\n\ttransport := &http.Transport{\n\t\tDial: func(netw, addr string) (net.Conn, error) {\n\t\t\tc, err := net.DialTimeout(netw, addr, timeout)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &deadlinedConn{timeout, c}, nil\n\t\t},\n\t}\n\treturn transport\n}\n\ntype wrappedResp struct {\n\tStatus     string      `json:\"status_txt\"`\n\tStatusCode int         `json:\"status_code\"`\n\tData       interface{} `json:\"data\"`\n}\n\n// stores the result in the value pointed to by ret(must be a pointer)\nfunc apiRequestNegotiateV1(method string, endpoint string, body io.Reader, ret interface{}) error {\n\thttpclient := &http.Client{Transport: newDeadlineTransport(2 * time.Second)}\n\treq, err := http.NewRequest(method, endpoint, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"Accept\", \"application/vnd.nsq; version=1.0\")\n\n\tresp, err := httpclient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"got response %s %q\", resp.Status, respBody)\n\t}\n\n\tif len(respBody) == 0 {\n\t\trespBody = []byte(\"{}\")\n\t}\n\n\tif resp.Header.Get(\"X-NSQ-Content-Type\") == \"nsq; version=1.0\" {\n\t\treturn json.Unmarshal(respBody, ret)\n\t}\n\n\twResp := &wrappedResp{\n\t\tData: ret,\n\t}\n\n\tif err = json.Unmarshal(respBody, wResp); err != nil {\n\t\treturn err\n\t}\n\n\t// wResp.StatusCode here is equal to resp.StatusCode, so ignore it\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/command.go",
    "content": "package nsq\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"time\"\n)\n\nvar byteSpace = []byte(\" \")\nvar byteNewLine = []byte(\"\\n\")\n\n// Command represents a command from a client to an NSQ daemon\ntype Command struct {\n\tName   []byte\n\tParams [][]byte\n\tBody   []byte\n}\n\n// String returns the name and parameters of the Command\nfunc (c *Command) String() string {\n\tif len(c.Params) > 0 {\n\t\treturn fmt.Sprintf(\"%s %s\", c.Name, string(bytes.Join(c.Params, byteSpace)))\n\t}\n\treturn string(c.Name)\n}\n\n// WriteTo implements the WriterTo interface and\n// serializes the Command to the supplied Writer.\n//\n// It is suggested that the target Writer is buffered\n// to avoid performing many system calls.\nfunc (c *Command) WriteTo(w io.Writer) (int64, error) {\n\tvar total int64\n\tvar buf [4]byte\n\n\tn, err := w.Write(c.Name)\n\ttotal += int64(n)\n\tif err != nil {\n\t\treturn total, err\n\t}\n\n\tfor _, param := range c.Params {\n\t\tn, err := w.Write(byteSpace)\n\t\ttotal += int64(n)\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t\tn, err = w.Write(param)\n\t\ttotal += int64(n)\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\n\tn, err = w.Write(byteNewLine)\n\ttotal += int64(n)\n\tif err != nil {\n\t\treturn total, err\n\t}\n\n\tif c.Body != nil {\n\t\tbufs := buf[:]\n\t\tbinary.BigEndian.PutUint32(bufs, uint32(len(c.Body)))\n\t\tn, err := w.Write(bufs)\n\t\ttotal += int64(n)\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t\tn, err = w.Write(c.Body)\n\t\ttotal += int64(n)\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\n\treturn total, nil\n}\n\n// Identify creates a new Command to provide information about the client.  After connecting,\n// it is generally the first message sent.\n//\n// The supplied map is marshaled into JSON to provide some flexibility\n// for this command to evolve over time.\n//\n// See http://nsq.io/clients/tcp_protocol_spec.html#identify for information\n// on the supported options\nfunc Identify(js map[string]interface{}) (*Command, error) {\n\tbody, err := json.Marshal(js)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Command{[]byte(\"IDENTIFY\"), nil, body}, nil\n}\n\n// Auth sends credentials for authentication\n//\n// After `Identify`, this is usually the first message sent, if auth is used.\nfunc Auth(secret string) (*Command, error) {\n\treturn &Command{[]byte(\"AUTH\"), nil, []byte(secret)}, nil\n}\n\n// Register creates a new Command to add a topic/channel for the connected nsqd\nfunc Register(topic string, channel string) *Command {\n\tparams := [][]byte{[]byte(topic)}\n\tif len(channel) > 0 {\n\t\tparams = append(params, []byte(channel))\n\t}\n\treturn &Command{[]byte(\"REGISTER\"), params, nil}\n}\n\n// UnRegister creates a new Command to remove a topic/channel for the connected nsqd\nfunc UnRegister(topic string, channel string) *Command {\n\tparams := [][]byte{[]byte(topic)}\n\tif len(channel) > 0 {\n\t\tparams = append(params, []byte(channel))\n\t}\n\treturn &Command{[]byte(\"UNREGISTER\"), params, nil}\n}\n\n// Ping creates a new Command to keep-alive the state of all the\n// announced topic/channels for a given client\nfunc Ping() *Command {\n\treturn &Command{[]byte(\"PING\"), nil, nil}\n}\n\n// Publish creates a new Command to write a message to a given topic\nfunc Publish(topic string, body []byte) *Command {\n\tvar params = [][]byte{[]byte(topic)}\n\treturn &Command{[]byte(\"PUB\"), params, body}\n}\n\n// DeferredPublish creates a new Command to write a message to a given topic\n// where the message will queue at the channel level until the timeout expires\nfunc DeferredPublish(topic string, delay time.Duration, body []byte) *Command {\n\tvar params = [][]byte{[]byte(topic), []byte(strconv.Itoa(int(delay / time.Millisecond)))}\n\treturn &Command{[]byte(\"DPUB\"), params, body}\n}\n\n// MultiPublish creates a new Command to write more than one message to a given topic\n// (useful for high-throughput situations to avoid roundtrips and saturate the pipe)\nfunc MultiPublish(topic string, bodies [][]byte) (*Command, error) {\n\tvar params = [][]byte{[]byte(topic)}\n\n\tnum := uint32(len(bodies))\n\tbodySize := 4\n\tfor _, b := range bodies {\n\t\tbodySize += len(b) + 4\n\t}\n\tbody := make([]byte, 0, bodySize)\n\tbuf := bytes.NewBuffer(body)\n\n\terr := binary.Write(buf, binary.BigEndian, &num)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, b := range bodies {\n\t\terr = binary.Write(buf, binary.BigEndian, int32(len(b)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = buf.Write(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &Command{[]byte(\"MPUB\"), params, buf.Bytes()}, nil\n}\n\n// Subscribe creates a new Command to subscribe to the given topic/channel\nfunc Subscribe(topic string, channel string) *Command {\n\tvar params = [][]byte{[]byte(topic), []byte(channel)}\n\treturn &Command{[]byte(\"SUB\"), params, nil}\n}\n\n// Ready creates a new Command to specify\n// the number of messages a client is willing to receive\nfunc Ready(count int) *Command {\n\tvar params = [][]byte{[]byte(strconv.Itoa(count))}\n\treturn &Command{[]byte(\"RDY\"), params, nil}\n}\n\n// Finish creates a new Command to indiciate that\n// a given message (by id) has been processed successfully\nfunc Finish(id MessageID) *Command {\n\tvar params = [][]byte{id[:]}\n\treturn &Command{[]byte(\"FIN\"), params, nil}\n}\n\n// Requeue creates a new Command to indicate that\n// a given message (by id) should be requeued after the given delay\n// NOTE: a delay of 0 indicates immediate requeue\nfunc Requeue(id MessageID, delay time.Duration) *Command {\n\tvar params = [][]byte{id[:], []byte(strconv.Itoa(int(delay / time.Millisecond)))}\n\treturn &Command{[]byte(\"REQ\"), params, nil}\n}\n\n// Touch creates a new Command to reset the timeout for\n// a given message (by id)\nfunc Touch(id MessageID) *Command {\n\tvar params = [][]byte{id[:]}\n\treturn &Command{[]byte(\"TOUCH\"), params, nil}\n}\n\n// StartClose creates a new Command to indicate that the\n// client would like to start a close cycle.  nsqd will no longer\n// send messages to a client in this state and the client is expected\n// finish pending messages and close the connection\nfunc StartClose() *Command {\n\treturn &Command{[]byte(\"CLS\"), nil, nil}\n}\n\n// Nop creates a new Command that has no effect server side.\n// Commonly used to respond to heartbeats\nfunc Nop() *Command {\n\treturn &Command{[]byte(\"NOP\"), nil, nil}\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/command_test.go",
    "content": "package nsq\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc BenchmarkCommand(b *testing.B) {\n\tb.StopTimer()\n\tdata := make([]byte, 2048)\n\tcmd := Publish(\"test\", data)\n\tvar buf bytes.Buffer\n\tb.StartTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcmd.WriteTo(&buf)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/config.go",
    "content": "package nsq\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"math/rand\"\n\t\"net\"\n\t\"os\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"unsafe\"\n)\n\n// Define handlers for setting config defaults, and setting config values from command line arguments or config files\ntype configHandler interface {\n\tHandlesOption(c *Config, option string) bool\n\tSet(c *Config, option string, value interface{}) error\n\tValidate(c *Config) error\n}\n\ntype defaultsHandler interface {\n\tSetDefaults(c *Config) error\n}\n\n// BackoffStrategy defines a strategy for calculating the duration of time\n// a consumer should backoff for a given attempt\ntype BackoffStrategy interface {\n\tCalculate(attempt int) time.Duration\n}\n\n// ExponentialStrategy implements an exponential backoff strategy (default)\ntype ExponentialStrategy struct {\n\tcfg *Config\n}\n\n// Calculate returns a duration of time: 2 ^ attempt\nfunc (s *ExponentialStrategy) Calculate(attempt int) time.Duration {\n\tbackoffDuration := s.cfg.BackoffMultiplier *\n\t\ttime.Duration(math.Pow(2, float64(attempt)))\n\treturn backoffDuration\n}\n\nfunc (s *ExponentialStrategy) setConfig(cfg *Config) {\n\ts.cfg = cfg\n}\n\n// FullJitterStrategy implements http://www.awsarchitectureblog.com/2015/03/backoff.html\ntype FullJitterStrategy struct {\n\tcfg *Config\n\n\trngOnce sync.Once\n\trng     *rand.Rand\n}\n\n// Calculate returns a random duration of time [0, 2 ^ attempt]\nfunc (s *FullJitterStrategy) Calculate(attempt int) time.Duration {\n\t// lazily initialize the RNG\n\ts.rngOnce.Do(func() {\n\t\tif s.rng != nil {\n\t\t\treturn\n\t\t}\n\t\ts.rng = rand.New(rand.NewSource(time.Now().UnixNano()))\n\t})\n\n\tbackoffDuration := s.cfg.BackoffMultiplier *\n\t\ttime.Duration(math.Pow(2, float64(attempt)))\n\treturn time.Duration(s.rng.Intn(int(backoffDuration)))\n}\n\nfunc (s *FullJitterStrategy) setConfig(cfg *Config) {\n\ts.cfg = cfg\n}\n\n// Config is a struct of NSQ options\n//\n// The only valid way to create a Config is via NewConfig, using a struct literal will panic.\n// After Config is passed into a high-level type (like Consumer, Producer, etc.) the values are no\n// longer mutable (they are copied).\n//\n// Use Set(option string, value interface{}) as an alternate way to set parameters\ntype Config struct {\n\tinitialized bool\n\n\t// used to Initialize, Validate\n\tconfigHandlers []configHandler\n\n\tDialTimeout time.Duration `opt:\"dial_timeout\" default:\"1s\"`\n\n\t// Deadlines for network reads and writes\n\tReadTimeout  time.Duration `opt:\"read_timeout\" min:\"100ms\" max:\"5m\" default:\"60s\"`\n\tWriteTimeout time.Duration `opt:\"write_timeout\" min:\"100ms\" max:\"5m\" default:\"1s\"`\n\n\t// LocalAddr is the local address to use when dialing an nsqd.\n\t// If empty, a local address is automatically chosen.\n\tLocalAddr net.Addr `opt:\"local_addr\"`\n\n\t// Duration between polling lookupd for new producers, and fractional jitter to add to\n\t// the lookupd pool loop. this helps evenly distribute requests even if multiple consumers\n\t// restart at the same time\n\t//\n\t// NOTE: when not using nsqlookupd, LookupdPollInterval represents the duration of time between\n\t// reconnection attempts\n\tLookupdPollInterval time.Duration `opt:\"lookupd_poll_interval\" min:\"10ms\" max:\"5m\" default:\"60s\"`\n\tLookupdPollJitter   float64       `opt:\"lookupd_poll_jitter\" min:\"0\" max:\"1\" default:\"0.3\"`\n\n\t// Maximum duration when REQueueing (for doubling of deferred requeue)\n\tMaxRequeueDelay     time.Duration `opt:\"max_requeue_delay\" min:\"0\" max:\"60m\" default:\"15m\"`\n\tDefaultRequeueDelay time.Duration `opt:\"default_requeue_delay\" min:\"0\" max:\"60m\" default:\"90s\"`\n\n\t// Backoff strategy, defaults to exponential backoff. Overwrite this to define alternative backoff algrithms.\n\tBackoffStrategy BackoffStrategy `opt:\"backoff_strategy\" default:\"exponential\"`\n\t// Maximum amount of time to backoff when processing fails 0 == no backoff\n\tMaxBackoffDuration time.Duration `opt:\"max_backoff_duration\" min:\"0\" max:\"60m\" default:\"2m\"`\n\t// Unit of time for calculating consumer backoff\n\tBackoffMultiplier time.Duration `opt:\"backoff_multiplier\" min:\"0\" max:\"60m\" default:\"1s\"`\n\n\t// Maximum number of times this consumer will attempt to process a message before giving up\n\tMaxAttempts uint16 `opt:\"max_attempts\" min:\"0\" max:\"65535\" default:\"5\"`\n\n\t// Duration to wait for a message from a producer when in a state where RDY\n\t// counts are re-distributed (ie. max_in_flight < num_producers)\n\tLowRdyIdleTimeout time.Duration `opt:\"low_rdy_idle_timeout\" min:\"1s\" max:\"5m\" default:\"10s\"`\n\n\t// Duration between redistributing max-in-flight to connections\n\tRDYRedistributeInterval time.Duration `opt:\"rdy_redistribute_interval\" min:\"1ms\" max:\"5s\" default:\"5s\"`\n\n\t// Identifiers sent to nsqd representing this client\n\t// UserAgent is in the spirit of HTTP (default: \"<client_library_name>/<version>\")\n\tClientID  string `opt:\"client_id\"` // (defaults: short hostname)\n\tHostname  string `opt:\"hostname\"`\n\tUserAgent string `opt:\"user_agent\"`\n\n\t// Duration of time between heartbeats. This must be less than ReadTimeout\n\tHeartbeatInterval time.Duration `opt:\"heartbeat_interval\" default:\"30s\"`\n\t// Integer percentage to sample the channel (requires nsqd 0.2.25+)\n\tSampleRate int32 `opt:\"sample_rate\" min:\"0\" max:\"99\"`\n\n\t// To set TLS config, use the following options:\n\t//\n\t// tls_v1 - Bool enable TLS negotiation\n\t// tls_root_ca_file - String path to file containing root CA\n\t// tls_insecure_skip_verify - Bool indicates whether this client should verify server certificates\n\t// tls_cert - String path to file containing public key for certificate\n\t// tls_key - String path to file containing private key for certificate\n\t// tls_min_version - String indicating the minimum version of tls acceptable ('ssl3.0', 'tls1.0', 'tls1.1', 'tls1.2')\n\t//\n\tTlsV1     bool        `opt:\"tls_v1\"`\n\tTlsConfig *tls.Config `opt:\"tls_config\"`\n\n\t// Compression Settings\n\tDeflate      bool `opt:\"deflate\"`\n\tDeflateLevel int  `opt:\"deflate_level\" min:\"1\" max:\"9\" default:\"6\"`\n\tSnappy       bool `opt:\"snappy\"`\n\n\t// Size of the buffer (in bytes) used by nsqd for buffering writes to this connection\n\tOutputBufferSize int64 `opt:\"output_buffer_size\" default:\"16384\"`\n\t// Timeout used by nsqd before flushing buffered writes (set to 0 to disable).\n\t//\n\t// WARNING: configuring clients with an extremely low\n\t// (< 25ms) output_buffer_timeout has a significant effect\n\t// on nsqd CPU usage (particularly with > 50 clients connected).\n\tOutputBufferTimeout time.Duration `opt:\"output_buffer_timeout\" default:\"250ms\"`\n\n\t// Maximum number of messages to allow in flight (concurrency knob)\n\tMaxInFlight int `opt:\"max_in_flight\" min:\"0\" default:\"1\"`\n\n\t// The server-side message timeout for messages delivered to this client\n\tMsgTimeout time.Duration `opt:\"msg_timeout\" min:\"0\"`\n\n\t// secret for nsqd authentication (requires nsqd 0.2.29+)\n\tAuthSecret string `opt:\"auth_secret\"`\n}\n\n// NewConfig returns a new default nsq configuration.\n//\n// This must be used to initialize Config structs. Values can be set directly, or through Config.Set()\nfunc NewConfig() *Config {\n\tc := &Config{\n\t\tconfigHandlers: []configHandler{&structTagsConfig{}, &tlsConfig{}},\n\t\tinitialized:    true,\n\t}\n\tif err := c.setDefaults(); err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn c\n}\n\n// Set takes an option as a string and a value as an interface and\n// attempts to set the appropriate configuration option.\n//\n// It attempts to coerce the value into the right format depending on the named\n// option and the underlying type of the value passed in.\n//\n// Calls to Set() that take a time.Duration as an argument can be input as:\n//\n// \t\"1000ms\" (a string parsed by time.ParseDuration())\n// \t1000 (an integer interpreted as milliseconds)\n// \t1000*time.Millisecond (a literal time.Duration value)\n//\n// Calls to Set() that take bool can be input as:\n//\n// \t\"true\" (a string parsed by strconv.ParseBool())\n// \ttrue (a boolean)\n// \t1 (an int where 1 == true and 0 == false)\n//\n// It returns an error for an invalid option or value.\nfunc (c *Config) Set(option string, value interface{}) error {\n\tc.assertInitialized()\n\toption = strings.Replace(option, \"-\", \"_\", -1)\n\tfor _, h := range c.configHandlers {\n\t\tif h.HandlesOption(c, option) {\n\t\t\treturn h.Set(c, option, value)\n\t\t}\n\t}\n\treturn fmt.Errorf(\"invalid option %s\", option)\n}\n\nfunc (c *Config) assertInitialized() {\n\tif !c.initialized {\n\t\tpanic(\"Config{} must be created with NewConfig()\")\n\t}\n}\n\n// Validate checks that all values are within specified min/max ranges\nfunc (c *Config) Validate() error {\n\tc.assertInitialized()\n\tfor _, h := range c.configHandlers {\n\t\tif err := h.Validate(c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Config) setDefaults() error {\n\tfor _, h := range c.configHandlers {\n\t\thh, ok := h.(defaultsHandler)\n\t\tif ok {\n\t\t\tif err := hh.SetDefaults(c); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype structTagsConfig struct{}\n\n// Handle options that are listed in StructTags\nfunc (h *structTagsConfig) HandlesOption(c *Config, option string) bool {\n\tval := reflect.ValueOf(c).Elem()\n\ttyp := val.Type()\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\topt := field.Tag.Get(\"opt\")\n\t\tif opt == option {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// Set values based on parameters in StructTags\nfunc (h *structTagsConfig) Set(c *Config, option string, value interface{}) error {\n\tval := reflect.ValueOf(c).Elem()\n\ttyp := val.Type()\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\topt := field.Tag.Get(\"opt\")\n\n\t\tif option != opt {\n\t\t\tcontinue\n\t\t}\n\n\t\tmin := field.Tag.Get(\"min\")\n\t\tmax := field.Tag.Get(\"max\")\n\n\t\tfieldVal := val.FieldByName(field.Name)\n\t\tdest := unsafeValueOf(fieldVal)\n\t\tcoercedVal, err := coerce(value, field.Type)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to coerce option %s (%v) - %s\",\n\t\t\t\toption, value, err)\n\t\t}\n\t\tif min != \"\" {\n\t\t\tcoercedMinVal, _ := coerce(min, field.Type)\n\t\t\tif valueCompare(coercedVal, coercedMinVal) == -1 {\n\t\t\t\treturn fmt.Errorf(\"invalid %s ! %v < %v\",\n\t\t\t\t\toption, coercedVal.Interface(), coercedMinVal.Interface())\n\t\t\t}\n\t\t}\n\t\tif max != \"\" {\n\t\t\tcoercedMaxVal, _ := coerce(max, field.Type)\n\t\t\tif valueCompare(coercedVal, coercedMaxVal) == 1 {\n\t\t\t\treturn fmt.Errorf(\"invalid %s ! %v > %v\",\n\t\t\t\t\toption, coercedVal.Interface(), coercedMaxVal.Interface())\n\t\t\t}\n\t\t}\n\t\tif coercedVal.Type().String() == \"nsq.BackoffStrategy\" {\n\t\t\tv := coercedVal.Interface().(BackoffStrategy)\n\t\t\tif v, ok := v.(interface {\n\t\t\t\tsetConfig(*Config)\n\t\t\t}); ok {\n\t\t\t\tv.setConfig(c)\n\t\t\t}\n\t\t}\n\t\tdest.Set(coercedVal)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown option %s\", option)\n}\n\nfunc (h *structTagsConfig) SetDefaults(c *Config) error {\n\tval := reflect.ValueOf(c).Elem()\n\ttyp := val.Type()\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\topt := field.Tag.Get(\"opt\")\n\t\tdefaultVal := field.Tag.Get(\"default\")\n\t\tif defaultVal == \"\" || opt == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := c.Set(opt, defaultVal); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalf(\"ERROR: unable to get hostname %s\", err.Error())\n\t}\n\n\tc.ClientID = strings.Split(hostname, \".\")[0]\n\tc.Hostname = hostname\n\tc.UserAgent = fmt.Sprintf(\"go-nsq/%s\", VERSION)\n\treturn nil\n}\n\nfunc (h *structTagsConfig) Validate(c *Config) error {\n\tval := reflect.ValueOf(c).Elem()\n\ttyp := val.Type()\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\n\t\tmin := field.Tag.Get(\"min\")\n\t\tmax := field.Tag.Get(\"max\")\n\n\t\tif min == \"\" && max == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue := val.FieldByName(field.Name)\n\n\t\tif min != \"\" {\n\t\t\tcoercedMinVal, _ := coerce(min, field.Type)\n\t\t\tif valueCompare(value, coercedMinVal) == -1 {\n\t\t\t\treturn fmt.Errorf(\"invalid %s ! %v < %v\",\n\t\t\t\t\tfield.Name, value.Interface(), coercedMinVal.Interface())\n\t\t\t}\n\t\t}\n\t\tif max != \"\" {\n\t\t\tcoercedMaxVal, _ := coerce(max, field.Type)\n\t\t\tif valueCompare(value, coercedMaxVal) == 1 {\n\t\t\t\treturn fmt.Errorf(\"invalid %s ! %v > %v\",\n\t\t\t\t\tfield.Name, value.Interface(), coercedMaxVal.Interface())\n\t\t\t}\n\t\t}\n\t}\n\n\tif c.HeartbeatInterval > c.ReadTimeout {\n\t\treturn fmt.Errorf(\"HeartbeatInterval %v must be less than ReadTimeout %v\", c.HeartbeatInterval, c.ReadTimeout)\n\t}\n\n\treturn nil\n}\n\n// Parsing for higher order TLS settings\ntype tlsConfig struct {\n\tcertFile string\n\tkeyFile  string\n}\n\nfunc (t *tlsConfig) HandlesOption(c *Config, option string) bool {\n\tswitch option {\n\tcase \"tls_root_ca_file\", \"tls_insecure_skip_verify\", \"tls_cert\", \"tls_key\", \"tls_min_version\":\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (t *tlsConfig) Set(c *Config, option string, value interface{}) error {\n\tif c.TlsConfig == nil {\n\t\tc.TlsConfig = &tls.Config{\n\t\t\tMinVersion: tls.VersionTLS10,\n\t\t\tMaxVersion: tls.VersionTLS12, // enable TLS_FALLBACK_SCSV prior to Go 1.5: https://go-review.googlesource.com/#/c/1776/\n\t\t}\n\t}\n\tval := reflect.ValueOf(c.TlsConfig).Elem()\n\n\tswitch option {\n\tcase \"tls_cert\", \"tls_key\":\n\t\tif option == \"tls_cert\" {\n\t\t\tt.certFile = value.(string)\n\t\t} else {\n\t\t\tt.keyFile = value.(string)\n\t\t}\n\t\tif t.certFile != \"\" && t.keyFile != \"\" && len(c.TlsConfig.Certificates) == 0 {\n\t\t\tcert, err := tls.LoadX509KeyPair(t.certFile, t.keyFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.TlsConfig.Certificates = []tls.Certificate{cert}\n\t\t}\n\t\treturn nil\n\tcase \"tls_root_ca_file\":\n\t\tfilename, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"ERROR: %v is not a string\", value)\n\t\t}\n\t\ttlsCertPool := x509.NewCertPool()\n\t\tcaCertFile, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ERROR: failed to read custom Certificate Authority file %s\", err)\n\t\t}\n\t\tif !tlsCertPool.AppendCertsFromPEM(caCertFile) {\n\t\t\treturn fmt.Errorf(\"ERROR: failed to append certificates from Certificate Authority file\")\n\t\t}\n\t\tc.TlsConfig.RootCAs = tlsCertPool\n\t\treturn nil\n\tcase \"tls_insecure_skip_verify\":\n\t\tfieldVal := val.FieldByName(\"InsecureSkipVerify\")\n\t\tdest := unsafeValueOf(fieldVal)\n\t\tcoercedVal, err := coerce(value, fieldVal.Type())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to coerce option %s (%v) - %s\",\n\t\t\t\toption, value, err)\n\t\t}\n\t\tdest.Set(coercedVal)\n\t\treturn nil\n\tcase \"tls_min_version\":\n\t\tversion, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"ERROR: %v is not a string\", value)\n\t\t}\n\t\tswitch version {\n\t\tcase \"ssl3.0\":\n\t\t\tc.TlsConfig.MinVersion = tls.VersionSSL30\n\t\tcase \"tls1.0\":\n\t\t\tc.TlsConfig.MinVersion = tls.VersionTLS10\n\t\tcase \"tls1.1\":\n\t\t\tc.TlsConfig.MinVersion = tls.VersionTLS11\n\t\tcase \"tls1.2\":\n\t\t\tc.TlsConfig.MinVersion = tls.VersionTLS12\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"ERROR: %v is not a tls version\", value)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"unknown option %s\", option)\n}\n\nfunc (t *tlsConfig) Validate(c *Config) error {\n\treturn nil\n}\n\n// because Config contains private structs we can't use reflect.Value\n// directly, instead we need to \"unsafely\" address the variable\nfunc unsafeValueOf(val reflect.Value) reflect.Value {\n\tuptr := unsafe.Pointer(val.UnsafeAddr())\n\treturn reflect.NewAt(val.Type(), uptr).Elem()\n}\n\nfunc valueCompare(v1 reflect.Value, v2 reflect.Value) int {\n\tswitch v1.Type().String() {\n\tcase \"int\", \"int16\", \"int32\", \"int64\":\n\t\tif v1.Int() > v2.Int() {\n\t\t\treturn 1\n\t\t} else if v1.Int() < v2.Int() {\n\t\t\treturn -1\n\t\t}\n\t\treturn 0\n\tcase \"uint\", \"uint16\", \"uint32\", \"uint64\":\n\t\tif v1.Uint() > v2.Uint() {\n\t\t\treturn 1\n\t\t} else if v1.Uint() < v2.Uint() {\n\t\t\treturn -1\n\t\t}\n\t\treturn 0\n\tcase \"float32\", \"float64\":\n\t\tif v1.Float() > v2.Float() {\n\t\t\treturn 1\n\t\t} else if v1.Float() < v2.Float() {\n\t\t\treturn -1\n\t\t}\n\t\treturn 0\n\tcase \"time.Duration\":\n\t\tif v1.Interface().(time.Duration) > v2.Interface().(time.Duration) {\n\t\t\treturn 1\n\t\t} else if v1.Interface().(time.Duration) < v2.Interface().(time.Duration) {\n\t\t\treturn -1\n\t\t}\n\t\treturn 0\n\t}\n\tpanic(\"impossible\")\n}\n\nfunc coerce(v interface{}, typ reflect.Type) (reflect.Value, error) {\n\tvar err error\n\tif typ.Kind() == reflect.Ptr {\n\t\treturn reflect.ValueOf(v), nil\n\t}\n\tswitch typ.String() {\n\tcase \"string\":\n\t\tv, err = coerceString(v)\n\tcase \"int\", \"int16\", \"int32\", \"int64\":\n\t\tv, err = coerceInt64(v)\n\tcase \"uint\", \"uint16\", \"uint32\", \"uint64\":\n\t\tv, err = coerceUint64(v)\n\tcase \"float32\", \"float64\":\n\t\tv, err = coerceFloat64(v)\n\tcase \"bool\":\n\t\tv, err = coerceBool(v)\n\tcase \"time.Duration\":\n\t\tv, err = coerceDuration(v)\n\tcase \"net.Addr\":\n\t\tv, err = coerceAddr(v)\n\tcase \"nsq.BackoffStrategy\":\n\t\tv, err = coerceBackoffStrategy(v)\n\tdefault:\n\t\tv = nil\n\t\terr = fmt.Errorf(\"invalid type %s\", typ.String())\n\t}\n\treturn valueTypeCoerce(v, typ), err\n}\n\nfunc valueTypeCoerce(v interface{}, typ reflect.Type) reflect.Value {\n\tval := reflect.ValueOf(v)\n\tif reflect.TypeOf(v) == typ {\n\t\treturn val\n\t}\n\ttval := reflect.New(typ).Elem()\n\tswitch typ.String() {\n\tcase \"int\", \"int16\", \"int32\", \"int64\":\n\t\ttval.SetInt(val.Int())\n\tcase \"uint\", \"uint16\", \"uint32\", \"uint64\":\n\t\ttval.SetUint(val.Uint())\n\tcase \"float32\", \"float64\":\n\t\ttval.SetFloat(val.Float())\n\tdefault:\n\t\ttval.Set(val)\n\t}\n\treturn tval\n}\n\nfunc coerceString(v interface{}) (string, error) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\treturn v, nil\n\tcase int, int16, int32, int64, uint, uint16, uint32, uint64:\n\t\treturn fmt.Sprintf(\"%d\", v), nil\n\tcase float32, float64:\n\t\treturn fmt.Sprintf(\"%f\", v), nil\n\t}\n\treturn fmt.Sprintf(\"%s\", v), nil\n}\n\nfunc coerceDuration(v interface{}) (time.Duration, error) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\treturn time.ParseDuration(v)\n\tcase int, int16, int32, int64:\n\t\t// treat like ms\n\t\treturn time.Duration(reflect.ValueOf(v).Int()) * time.Millisecond, nil\n\tcase uint, uint16, uint32, uint64:\n\t\t// treat like ms\n\t\treturn time.Duration(reflect.ValueOf(v).Uint()) * time.Millisecond, nil\n\tcase time.Duration:\n\t\treturn v, nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}\n\nfunc coerceAddr(v interface{}) (net.Addr, error) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\treturn net.ResolveTCPAddr(\"tcp\", v)\n\tcase net.Addr:\n\t\treturn v, nil\n\t}\n\treturn nil, errors.New(\"invalid value type\")\n}\n\nfunc coerceBackoffStrategy(v interface{}) (BackoffStrategy, error) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\tswitch v {\n\t\tcase \"\", \"exponential\":\n\t\t\treturn &ExponentialStrategy{}, nil\n\t\tcase \"full_jitter\":\n\t\t\treturn &FullJitterStrategy{}, nil\n\t\t}\n\tcase BackoffStrategy:\n\t\treturn v, nil\n\t}\n\treturn nil, errors.New(\"invalid value type\")\n}\n\nfunc coerceBool(v interface{}) (bool, error) {\n\tswitch v := v.(type) {\n\tcase bool:\n\t\treturn v, nil\n\tcase string:\n\t\treturn strconv.ParseBool(v)\n\tcase int, int16, int32, int64:\n\t\treturn reflect.ValueOf(v).Int() != 0, nil\n\tcase uint, uint16, uint32, uint64:\n\t\treturn reflect.ValueOf(v).Uint() != 0, nil\n\t}\n\treturn false, errors.New(\"invalid value type\")\n}\n\nfunc coerceFloat64(v interface{}) (float64, error) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\treturn strconv.ParseFloat(v, 64)\n\tcase int, int16, int32, int64:\n\t\treturn float64(reflect.ValueOf(v).Int()), nil\n\tcase uint, uint16, uint32, uint64:\n\t\treturn float64(reflect.ValueOf(v).Uint()), nil\n\tcase float32:\n\t\treturn float64(v), nil\n\tcase float64:\n\t\treturn v, nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}\n\nfunc coerceInt64(v interface{}) (int64, error) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\treturn strconv.ParseInt(v, 10, 64)\n\tcase int, int16, int32, int64:\n\t\treturn reflect.ValueOf(v).Int(), nil\n\tcase uint, uint16, uint32, uint64:\n\t\treturn int64(reflect.ValueOf(v).Uint()), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}\n\nfunc coerceUint64(v interface{}) (uint64, error) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\treturn strconv.ParseUint(v, 10, 64)\n\tcase int, int16, int32, int64:\n\t\treturn uint64(reflect.ValueOf(v).Int()), nil\n\tcase uint, uint16, uint32, uint64:\n\t\treturn reflect.ValueOf(v).Uint(), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/config_test.go",
    "content": "package nsq\n\nimport (\n\t\"math/rand\"\n\t\"net\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestConfigSet(t *testing.T) {\n\tc := NewConfig()\n\tif err := c.Set(\"not a real config value\", struct{}{}); err == nil {\n\t\tt.Error(\"No error when setting an invalid value\")\n\t}\n\tif err := c.Set(\"tls_v1\", \"lol\"); err == nil {\n\t\tt.Error(\"No error when setting `tls_v1` to an invalid value\")\n\t}\n\tif err := c.Set(\"tls_v1\", true); err != nil {\n\t\tt.Errorf(\"Error setting `tls_v1` config. %s\", err)\n\t}\n\n\tif err := c.Set(\"tls-insecure-skip-verify\", true); err != nil {\n\t\tt.Errorf(\"Error setting `tls-insecure-skip-verify` config. %v\", err)\n\t}\n\tif c.TlsConfig.InsecureSkipVerify != true {\n\t\tt.Errorf(\"Error setting `tls-insecure-skip-verify` config: %v\", c.TlsConfig)\n\t}\n\tif err := c.Set(\"tls-min-version\", \"tls1.2\"); err != nil {\n\t\tt.Errorf(\"Error setting `tls-min-version` config: %s\", err)\n\t}\n\tif err := c.Set(\"tls-min-version\", \"tls1.3\"); err == nil {\n\t\tt.Error(\"No error when setting `tls-min-version` to an invalid value\")\n\t}\n\tif err := c.Set(\"local_addr\", &net.TCPAddr{}); err != nil {\n\t\tt.Errorf(\"Error setting `local_addr` config: %s\", err)\n\t}\n\tif err := c.Set(\"local_addr\", \"1.2.3.4:27015\"); err != nil {\n\t\tt.Errorf(\"Error setting `local_addr` config: %s\", err)\n\t}\n\tif err := c.Set(\"dial_timeout\", \"5s\"); err != nil {\n\t\tt.Errorf(\"Error setting `dial_timeout` config: %s\", err)\n\t}\n\tif c.LocalAddr.String() != \"1.2.3.4:27015\" {\n\t\tt.Error(\"Failed to assign `local_addr` config\")\n\t}\n\tif reflect.ValueOf(c.BackoffStrategy).Type().String() != \"*nsq.ExponentialStrategy\" {\n\t\tt.Error(\"Failed to set default `exponential` backoff strategy\")\n\t}\n\tif err := c.Set(\"backoff_strategy\", \"full_jitter\"); err != nil {\n\t\tt.Errorf(\"Failed to assign `backoff_strategy` config: %v\", err)\n\t}\n\tif reflect.ValueOf(c.BackoffStrategy).Type().String() != \"*nsq.FullJitterStrategy\" {\n\t\tt.Error(\"Failed to set `full_jitter` backoff strategy\")\n\t}\n}\n\nfunc TestConfigValidate(t *testing.T) {\n\tc := NewConfig()\n\tif err := c.Validate(); err != nil {\n\t\tt.Error(\"initialized config is invalid\")\n\t}\n\tc.DeflateLevel = 100\n\tif err := c.Validate(); err == nil {\n\t\tt.Error(\"no error set for invalid value\")\n\t}\n}\n\nfunc TestExponentialBackoff(t *testing.T) {\n\texpected := []time.Duration{\n\t\t1 * time.Second,\n\t\t2 * time.Second,\n\t\t8 * time.Second,\n\t\t32 * time.Second,\n\t}\n\tbackoffTest(t, expected, func(c *Config) BackoffStrategy {\n\t\treturn &ExponentialStrategy{cfg: c}\n\t})\n}\n\nfunc TestFullJitterBackoff(t *testing.T) {\n\texpected := []time.Duration{\n\t\t566028617 * time.Nanosecond,\n\t\t1365407263 * time.Nanosecond,\n\t\t5232470547 * time.Nanosecond,\n\t\t21467499218 * time.Nanosecond,\n\t}\n\tbackoffTest(t, expected, func(c *Config) BackoffStrategy {\n\t\treturn &FullJitterStrategy{cfg: c, rng: rand.New(rand.NewSource(99))}\n\t})\n}\n\nfunc backoffTest(t *testing.T, expected []time.Duration, cb func(c *Config) BackoffStrategy) {\n\tconfig := NewConfig()\n\tattempts := []int{0, 1, 3, 5}\n\ts := cb(config)\n\tfor i := range attempts {\n\t\tresult := s.Calculate(attempts[i])\n\t\tif result != expected[i] {\n\t\t\tt.Fatalf(\"wrong backoff duration %v for attempt %d (should be %v)\",\n\t\t\t\tresult, attempts[i], expected[i])\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/conn.go",
    "content": "package nsq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress/flate\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/mreiferson/go-snappystream\"\n)\n\n// IdentifyResponse represents the metadata\n// returned from an IDENTIFY command to nsqd\ntype IdentifyResponse struct {\n\tMaxRdyCount  int64 `json:\"max_rdy_count\"`\n\tTLSv1        bool  `json:\"tls_v1\"`\n\tDeflate      bool  `json:\"deflate\"`\n\tSnappy       bool  `json:\"snappy\"`\n\tAuthRequired bool  `json:\"auth_required\"`\n}\n\n// AuthResponse represents the metadata\n// returned from an AUTH command to nsqd\ntype AuthResponse struct {\n\tIdentity        string `json:\"identity\"`\n\tIdentityUrl     string `json:\"identity_url\"`\n\tPermissionCount int64  `json:\"permission_count\"`\n}\n\ntype msgResponse struct {\n\tmsg     *Message\n\tcmd     *Command\n\tsuccess bool\n\tbackoff bool\n}\n\n// Conn represents a connection to nsqd\n//\n// Conn exposes a set of callbacks for the\n// various events that occur on a connection\ntype Conn struct {\n\t// 64bit atomic vars need to be first for proper alignment on 32bit platforms\n\tmessagesInFlight int64\n\tmaxRdyCount      int64\n\trdyCount         int64\n\tlastRdyCount     int64\n\tlastMsgTimestamp int64\n\n\tmtx sync.Mutex\n\n\tconfig *Config\n\n\tconn    *net.TCPConn\n\ttlsConn *tls.Conn\n\taddr    string\n\n\tdelegate ConnDelegate\n\n\tlogger   logger\n\tlogLvl   LogLevel\n\tlogFmt   string\n\tlogGuard sync.RWMutex\n\n\tr io.Reader\n\tw io.Writer\n\n\tcmdChan         chan *Command\n\tmsgResponseChan chan *msgResponse\n\texitChan        chan int\n\tdrainReady      chan int\n\n\tcloseFlag int32\n\tstopper   sync.Once\n\twg        sync.WaitGroup\n\n\treadLoopRunning int32\n}\n\n// NewConn returns a new Conn instance\nfunc NewConn(addr string, config *Config, delegate ConnDelegate) *Conn {\n\tif !config.initialized {\n\t\tpanic(\"Config must be created with NewConfig()\")\n\t}\n\treturn &Conn{\n\t\taddr: addr,\n\n\t\tconfig:   config,\n\t\tdelegate: delegate,\n\n\t\tmaxRdyCount:      2500,\n\t\tlastMsgTimestamp: time.Now().UnixNano(),\n\n\t\tcmdChan:         make(chan *Command),\n\t\tmsgResponseChan: make(chan *msgResponse),\n\t\texitChan:        make(chan int),\n\t\tdrainReady:      make(chan int),\n\t}\n}\n\n// SetLogger assigns the logger to use as well as a level.\n//\n// The format parameter is expected to be a printf compatible string with\n// a single %s argument.  This is useful if you want to provide additional\n// context to the log messages that the connection will print, the default\n// is '(%s)'.\n//\n// The logger parameter is an interface that requires the following\n// method to be implemented (such as the the stdlib log.Logger):\n//\n//    Output(calldepth int, s string)\n//\nfunc (c *Conn) SetLogger(l logger, lvl LogLevel, format string) {\n\tc.logGuard.Lock()\n\tdefer c.logGuard.Unlock()\n\n\tc.logger = l\n\tc.logLvl = lvl\n\tc.logFmt = format\n\tif c.logFmt == \"\" {\n\t\tc.logFmt = \"(%s)\"\n\t}\n}\n\nfunc (c *Conn) getLogger() (logger, LogLevel, string) {\n\tc.logGuard.RLock()\n\tdefer c.logGuard.RUnlock()\n\n\treturn c.logger, c.logLvl, c.logFmt\n}\n\n// Connect dials and bootstraps the nsqd connection\n// (including IDENTIFY) and returns the IdentifyResponse\nfunc (c *Conn) Connect() (*IdentifyResponse, error) {\n\tdialer := &net.Dialer{\n\t\tLocalAddr: c.config.LocalAddr,\n\t\tTimeout:   c.config.DialTimeout,\n\t}\n\n\tconn, err := dialer.Dial(\"tcp\", c.addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.conn = conn.(*net.TCPConn)\n\tc.r = conn\n\tc.w = conn\n\n\t_, err = c.Write(MagicV2)\n\tif err != nil {\n\t\tc.Close()\n\t\treturn nil, fmt.Errorf(\"[%s] failed to write magic - %s\", c.addr, err)\n\t}\n\n\tresp, err := c.identify()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp != nil && resp.AuthRequired {\n\t\tif c.config.AuthSecret == \"\" {\n\t\t\tc.log(LogLevelError, \"Auth Required\")\n\t\t\treturn nil, errors.New(\"Auth Required\")\n\t\t}\n\t\terr := c.auth(c.config.AuthSecret)\n\t\tif err != nil {\n\t\t\tc.log(LogLevelError, \"Auth Failed %s\", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tc.wg.Add(2)\n\tatomic.StoreInt32(&c.readLoopRunning, 1)\n\tgo c.readLoop()\n\tgo c.writeLoop()\n\treturn resp, nil\n}\n\n// Close idempotently initiates connection close\nfunc (c *Conn) Close() error {\n\tatomic.StoreInt32(&c.closeFlag, 1)\n\tif c.conn != nil && atomic.LoadInt64(&c.messagesInFlight) == 0 {\n\t\treturn c.conn.CloseRead()\n\t}\n\treturn nil\n}\n\n// IsClosing indicates whether or not the\n// connection is currently in the processing of\n// gracefully closing\nfunc (c *Conn) IsClosing() bool {\n\treturn atomic.LoadInt32(&c.closeFlag) == 1\n}\n\n// RDY returns the current RDY count\nfunc (c *Conn) RDY() int64 {\n\treturn atomic.LoadInt64(&c.rdyCount)\n}\n\n// LastRDY returns the previously set RDY count\nfunc (c *Conn) LastRDY() int64 {\n\treturn atomic.LoadInt64(&c.lastRdyCount)\n}\n\n// SetRDY stores the specified RDY count\nfunc (c *Conn) SetRDY(rdy int64) {\n\tatomic.StoreInt64(&c.rdyCount, rdy)\n\tatomic.StoreInt64(&c.lastRdyCount, rdy)\n}\n\n// MaxRDY returns the nsqd negotiated maximum\n// RDY count that it will accept for this connection\nfunc (c *Conn) MaxRDY() int64 {\n\treturn c.maxRdyCount\n}\n\n// LastMessageTime returns a time.Time representing\n// the time at which the last message was received\nfunc (c *Conn) LastMessageTime() time.Time {\n\treturn time.Unix(0, atomic.LoadInt64(&c.lastMsgTimestamp))\n}\n\n// RemoteAddr returns the configured destination nsqd address\nfunc (c *Conn) RemoteAddr() net.Addr {\n\treturn c.conn.RemoteAddr()\n}\n\n// String returns the fully-qualified address\nfunc (c *Conn) String() string {\n\treturn c.addr\n}\n\n// Read performs a deadlined read on the underlying TCP connection\nfunc (c *Conn) Read(p []byte) (int, error) {\n\tc.conn.SetReadDeadline(time.Now().Add(c.config.ReadTimeout))\n\treturn c.r.Read(p)\n}\n\n// Write performs a deadlined write on the underlying TCP connection\nfunc (c *Conn) Write(p []byte) (int, error) {\n\tc.conn.SetWriteDeadline(time.Now().Add(c.config.WriteTimeout))\n\treturn c.w.Write(p)\n}\n\n// WriteCommand is a goroutine safe method to write a Command\n// to this connection, and flush.\nfunc (c *Conn) WriteCommand(cmd *Command) error {\n\tc.mtx.Lock()\n\n\t_, err := cmd.WriteTo(c)\n\tif err != nil {\n\t\tgoto exit\n\t}\n\terr = c.Flush()\n\nexit:\n\tc.mtx.Unlock()\n\tif err != nil {\n\t\tc.log(LogLevelError, \"IO error - %s\", err)\n\t\tc.delegate.OnIOError(c, err)\n\t}\n\treturn err\n}\n\ntype flusher interface {\n\tFlush() error\n}\n\n// Flush writes all buffered data to the underlying TCP connection\nfunc (c *Conn) Flush() error {\n\tif f, ok := c.w.(flusher); ok {\n\t\treturn f.Flush()\n\t}\n\treturn nil\n}\n\nfunc (c *Conn) identify() (*IdentifyResponse, error) {\n\tci := make(map[string]interface{})\n\tci[\"client_id\"] = c.config.ClientID\n\tci[\"hostname\"] = c.config.Hostname\n\tci[\"user_agent\"] = c.config.UserAgent\n\tci[\"short_id\"] = c.config.ClientID // deprecated\n\tci[\"long_id\"] = c.config.Hostname  // deprecated\n\tci[\"tls_v1\"] = c.config.TlsV1\n\tci[\"deflate\"] = c.config.Deflate\n\tci[\"deflate_level\"] = c.config.DeflateLevel\n\tci[\"snappy\"] = c.config.Snappy\n\tci[\"feature_negotiation\"] = true\n\tif c.config.HeartbeatInterval == -1 {\n\t\tci[\"heartbeat_interval\"] = -1\n\t} else {\n\t\tci[\"heartbeat_interval\"] = int64(c.config.HeartbeatInterval / time.Millisecond)\n\t}\n\tci[\"sample_rate\"] = c.config.SampleRate\n\tci[\"output_buffer_size\"] = c.config.OutputBufferSize\n\tif c.config.OutputBufferTimeout == -1 {\n\t\tci[\"output_buffer_timeout\"] = -1\n\t} else {\n\t\tci[\"output_buffer_timeout\"] = int64(c.config.OutputBufferTimeout / time.Millisecond)\n\t}\n\tci[\"msg_timeout\"] = int64(c.config.MsgTimeout / time.Millisecond)\n\tcmd, err := Identify(ci)\n\tif err != nil {\n\t\treturn nil, ErrIdentify{err.Error()}\n\t}\n\n\terr = c.WriteCommand(cmd)\n\tif err != nil {\n\t\treturn nil, ErrIdentify{err.Error()}\n\t}\n\n\tframeType, data, err := ReadUnpackedResponse(c)\n\tif err != nil {\n\t\treturn nil, ErrIdentify{err.Error()}\n\t}\n\n\tif frameType == FrameTypeError {\n\t\treturn nil, ErrIdentify{string(data)}\n\t}\n\n\t// check to see if the server was able to respond w/ capabilities\n\t// i.e. it was a JSON response\n\tif data[0] != '{' {\n\t\treturn nil, nil\n\t}\n\n\tresp := &IdentifyResponse{}\n\terr = json.Unmarshal(data, resp)\n\tif err != nil {\n\t\treturn nil, ErrIdentify{err.Error()}\n\t}\n\n\tc.log(LogLevelDebug, \"IDENTIFY response: %+v\", resp)\n\n\tc.maxRdyCount = resp.MaxRdyCount\n\n\tif resp.TLSv1 {\n\t\tc.log(LogLevelInfo, \"upgrading to TLS\")\n\t\terr := c.upgradeTLS(c.config.TlsConfig)\n\t\tif err != nil {\n\t\t\treturn nil, ErrIdentify{err.Error()}\n\t\t}\n\t}\n\n\tif resp.Deflate {\n\t\tc.log(LogLevelInfo, \"upgrading to Deflate\")\n\t\terr := c.upgradeDeflate(c.config.DeflateLevel)\n\t\tif err != nil {\n\t\t\treturn nil, ErrIdentify{err.Error()}\n\t\t}\n\t}\n\n\tif resp.Snappy {\n\t\tc.log(LogLevelInfo, \"upgrading to Snappy\")\n\t\terr := c.upgradeSnappy()\n\t\tif err != nil {\n\t\t\treturn nil, ErrIdentify{err.Error()}\n\t\t}\n\t}\n\n\t// now that connection is bootstrapped, enable read buffering\n\t// (and write buffering if it's not already capable of Flush())\n\tc.r = bufio.NewReader(c.r)\n\tif _, ok := c.w.(flusher); !ok {\n\t\tc.w = bufio.NewWriter(c.w)\n\t}\n\n\treturn resp, nil\n}\n\nfunc (c *Conn) upgradeTLS(tlsConf *tls.Config) error {\n\t// create a local copy of the config to set ServerName for this connection\n\tvar conf tls.Config\n\tif tlsConf != nil {\n\t\tconf = *tlsConf\n\t}\n\thost, _, err := net.SplitHostPort(c.addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf.ServerName = host\n\n\tc.tlsConn = tls.Client(c.conn, &conf)\n\terr = c.tlsConn.Handshake()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.r = c.tlsConn\n\tc.w = c.tlsConn\n\tframeType, data, err := ReadUnpackedResponse(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif frameType != FrameTypeResponse || !bytes.Equal(data, []byte(\"OK\")) {\n\t\treturn errors.New(\"invalid response from TLS upgrade\")\n\t}\n\treturn nil\n}\n\nfunc (c *Conn) upgradeDeflate(level int) error {\n\tconn := net.Conn(c.conn)\n\tif c.tlsConn != nil {\n\t\tconn = c.tlsConn\n\t}\n\tfw, _ := flate.NewWriter(conn, level)\n\tc.r = flate.NewReader(conn)\n\tc.w = fw\n\tframeType, data, err := ReadUnpackedResponse(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif frameType != FrameTypeResponse || !bytes.Equal(data, []byte(\"OK\")) {\n\t\treturn errors.New(\"invalid response from Deflate upgrade\")\n\t}\n\treturn nil\n}\n\nfunc (c *Conn) upgradeSnappy() error {\n\tconn := net.Conn(c.conn)\n\tif c.tlsConn != nil {\n\t\tconn = c.tlsConn\n\t}\n\tc.r = snappystream.NewReader(conn, snappystream.SkipVerifyChecksum)\n\tc.w = snappystream.NewWriter(conn)\n\tframeType, data, err := ReadUnpackedResponse(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif frameType != FrameTypeResponse || !bytes.Equal(data, []byte(\"OK\")) {\n\t\treturn errors.New(\"invalid response from Snappy upgrade\")\n\t}\n\treturn nil\n}\n\nfunc (c *Conn) auth(secret string) error {\n\tcmd, err := Auth(secret)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.WriteCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tframeType, data, err := ReadUnpackedResponse(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif frameType == FrameTypeError {\n\t\treturn errors.New(\"Error authenticating \" + string(data))\n\t}\n\n\tresp := &AuthResponse{}\n\terr = json.Unmarshal(data, resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.log(LogLevelInfo, \"Auth accepted. Identity: %q %s Permissions: %d\",\n\t\tresp.Identity, resp.IdentityUrl, resp.PermissionCount)\n\n\treturn nil\n}\n\nfunc (c *Conn) readLoop() {\n\tdelegate := &connMessageDelegate{c}\n\tfor {\n\t\tif atomic.LoadInt32(&c.closeFlag) == 1 {\n\t\t\tgoto exit\n\t\t}\n\n\t\tframeType, data, err := ReadUnpackedResponse(c)\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\t\tc.log(LogLevelError, \"IO error - %s\", err)\n\t\t\t\tc.delegate.OnIOError(c, err)\n\t\t\t}\n\t\t\tgoto exit\n\t\t}\n\n\t\tif frameType == FrameTypeResponse && bytes.Equal(data, []byte(\"_heartbeat_\")) {\n\t\t\tc.log(LogLevelDebug, \"heartbeat received\")\n\t\t\tc.delegate.OnHeartbeat(c)\n\t\t\terr := c.WriteCommand(Nop())\n\t\t\tif err != nil {\n\t\t\t\tc.log(LogLevelError, \"IO error - %s\", err)\n\t\t\t\tc.delegate.OnIOError(c, err)\n\t\t\t\tgoto exit\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch frameType {\n\t\tcase FrameTypeResponse:\n\t\t\tc.delegate.OnResponse(c, data)\n\t\tcase FrameTypeMessage:\n\t\t\tmsg, err := DecodeMessage(data)\n\t\t\tif err != nil {\n\t\t\t\tc.log(LogLevelError, \"IO error - %s\", err)\n\t\t\t\tc.delegate.OnIOError(c, err)\n\t\t\t\tgoto exit\n\t\t\t}\n\t\t\tmsg.Delegate = delegate\n\t\t\tmsg.NSQDAddress = c.String()\n\n\t\t\tatomic.AddInt64(&c.rdyCount, -1)\n\t\t\tatomic.AddInt64(&c.messagesInFlight, 1)\n\t\t\tatomic.StoreInt64(&c.lastMsgTimestamp, time.Now().UnixNano())\n\n\t\t\tc.delegate.OnMessage(c, msg)\n\t\tcase FrameTypeError:\n\t\t\tc.log(LogLevelError, \"protocol error - %s\", data)\n\t\t\tc.delegate.OnError(c, data)\n\t\tdefault:\n\t\t\tc.log(LogLevelError, \"IO error - %s\", err)\n\t\t\tc.delegate.OnIOError(c, fmt.Errorf(\"unknown frame type %d\", frameType))\n\t\t}\n\t}\n\nexit:\n\tatomic.StoreInt32(&c.readLoopRunning, 0)\n\t// start the connection close\n\tmessagesInFlight := atomic.LoadInt64(&c.messagesInFlight)\n\tif messagesInFlight == 0 {\n\t\t// if we exited readLoop with no messages in flight\n\t\t// we need to explicitly trigger the close because\n\t\t// writeLoop won't\n\t\tc.close()\n\t} else {\n\t\tc.log(LogLevelWarning, \"delaying close, %d outstanding messages\", messagesInFlight)\n\t}\n\tc.wg.Done()\n\tc.log(LogLevelInfo, \"readLoop exiting\")\n}\n\nfunc (c *Conn) writeLoop() {\n\tfor {\n\t\tselect {\n\t\tcase <-c.exitChan:\n\t\t\tc.log(LogLevelInfo, \"breaking out of writeLoop\")\n\t\t\t// Indicate drainReady because we will not pull any more off msgResponseChan\n\t\t\tclose(c.drainReady)\n\t\t\tgoto exit\n\t\tcase cmd := <-c.cmdChan:\n\t\t\terr := c.WriteCommand(cmd)\n\t\t\tif err != nil {\n\t\t\t\tc.log(LogLevelError, \"error sending command %s - %s\", cmd, err)\n\t\t\t\tc.close()\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase resp := <-c.msgResponseChan:\n\t\t\t// Decrement this here so it is correct even if we can't respond to nsqd\n\t\t\tmsgsInFlight := atomic.AddInt64(&c.messagesInFlight, -1)\n\n\t\t\tif resp.success {\n\t\t\t\tc.log(LogLevelDebug, \"FIN %s\", resp.msg.ID)\n\t\t\t\tc.delegate.OnMessageFinished(c, resp.msg)\n\t\t\t\tc.delegate.OnResume(c)\n\t\t\t} else {\n\t\t\t\tc.log(LogLevelDebug, \"REQ %s\", resp.msg.ID)\n\t\t\t\tc.delegate.OnMessageRequeued(c, resp.msg)\n\t\t\t\tif resp.backoff {\n\t\t\t\t\tc.delegate.OnBackoff(c)\n\t\t\t\t} else {\n\t\t\t\t\tc.delegate.OnContinue(c)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr := c.WriteCommand(resp.cmd)\n\t\t\tif err != nil {\n\t\t\t\tc.log(LogLevelError, \"error sending command %s - %s\", resp.cmd, err)\n\t\t\t\tc.close()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif msgsInFlight == 0 &&\n\t\t\t\tatomic.LoadInt32(&c.closeFlag) == 1 {\n\t\t\t\tc.close()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\nexit:\n\tc.wg.Done()\n\tc.log(LogLevelInfo, \"writeLoop exiting\")\n}\n\nfunc (c *Conn) close() {\n\t// a \"clean\" connection close is orchestrated as follows:\n\t//\n\t//     1. CLOSE cmd sent to nsqd\n\t//     2. CLOSE_WAIT response received from nsqd\n\t//     3. set c.closeFlag\n\t//     4. readLoop() exits\n\t//         a. if messages-in-flight > 0 delay close()\n\t//             i. writeLoop() continues receiving on c.msgResponseChan chan\n\t//                 x. when messages-in-flight == 0 call close()\n\t//         b. else call close() immediately\n\t//     5. c.exitChan close\n\t//         a. writeLoop() exits\n\t//             i. c.drainReady close\n\t//     6a. launch cleanup() goroutine (we're racing with intraprocess\n\t//        routed messages, see comments below)\n\t//         a. wait on c.drainReady\n\t//         b. loop and receive on c.msgResponseChan chan\n\t//            until messages-in-flight == 0\n\t//            i. ensure that readLoop has exited\n\t//     6b. launch waitForCleanup() goroutine\n\t//         b. wait on waitgroup (covers readLoop() and writeLoop()\n\t//            and cleanup goroutine)\n\t//         c. underlying TCP connection close\n\t//         d. trigger Delegate OnClose()\n\t//\n\tc.stopper.Do(func() {\n\t\tc.log(LogLevelInfo, \"beginning close\")\n\t\tclose(c.exitChan)\n\t\tc.conn.CloseRead()\n\n\t\tc.wg.Add(1)\n\t\tgo c.cleanup()\n\n\t\tgo c.waitForCleanup()\n\t})\n}\n\nfunc (c *Conn) cleanup() {\n\t<-c.drainReady\n\tticker := time.NewTicker(100 * time.Millisecond)\n\tlastWarning := time.Now()\n\t// writeLoop has exited, drain any remaining in flight messages\n\tfor {\n\t\t// we're racing with readLoop which potentially has a message\n\t\t// for handling so infinitely loop until messagesInFlight == 0\n\t\t// and readLoop has exited\n\t\tvar msgsInFlight int64\n\t\tselect {\n\t\tcase <-c.msgResponseChan:\n\t\t\tmsgsInFlight = atomic.AddInt64(&c.messagesInFlight, -1)\n\t\tcase <-ticker.C:\n\t\t\tmsgsInFlight = atomic.LoadInt64(&c.messagesInFlight)\n\t\t}\n\t\tif msgsInFlight > 0 {\n\t\t\tif time.Now().Sub(lastWarning) > time.Second {\n\t\t\t\tc.log(LogLevelWarning, \"draining... waiting for %d messages in flight\", msgsInFlight)\n\t\t\t\tlastWarning = time.Now()\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// until the readLoop has exited we cannot be sure that there\n\t\t// still won't be a race\n\t\tif atomic.LoadInt32(&c.readLoopRunning) == 1 {\n\t\t\tif time.Now().Sub(lastWarning) > time.Second {\n\t\t\t\tc.log(LogLevelWarning, \"draining... readLoop still running\")\n\t\t\t\tlastWarning = time.Now()\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tgoto exit\n\t}\n\nexit:\n\tticker.Stop()\n\tc.wg.Done()\n\tc.log(LogLevelInfo, \"finished draining, cleanup exiting\")\n}\n\nfunc (c *Conn) waitForCleanup() {\n\t// this blocks until readLoop and writeLoop\n\t// (and cleanup goroutine above) have exited\n\tc.wg.Wait()\n\tc.conn.CloseWrite()\n\tc.log(LogLevelInfo, \"clean close complete\")\n\tc.delegate.OnClose(c)\n}\n\nfunc (c *Conn) onMessageFinish(m *Message) {\n\tc.msgResponseChan <- &msgResponse{msg: m, cmd: Finish(m.ID), success: true}\n}\n\nfunc (c *Conn) onMessageRequeue(m *Message, delay time.Duration, backoff bool) {\n\tif delay == -1 {\n\t\t// linear delay\n\t\tdelay = c.config.DefaultRequeueDelay * time.Duration(m.Attempts)\n\t\t// bound the requeueDelay to configured max\n\t\tif delay > c.config.MaxRequeueDelay {\n\t\t\tdelay = c.config.MaxRequeueDelay\n\t\t}\n\t}\n\tc.msgResponseChan <- &msgResponse{msg: m, cmd: Requeue(m.ID, delay), success: false, backoff: backoff}\n}\n\nfunc (c *Conn) onMessageTouch(m *Message) {\n\tselect {\n\tcase c.cmdChan <- Touch(m.ID):\n\tcase <-c.exitChan:\n\t}\n}\n\nfunc (c *Conn) log(lvl LogLevel, line string, args ...interface{}) {\n\tlogger, logLvl, logFmt := c.getLogger()\n\n\tif logger == nil {\n\t\treturn\n\t}\n\n\tif logLvl > lvl {\n\t\treturn\n\t}\n\n\tlogger.Output(2, fmt.Sprintf(\"%-4s %s %s\", lvl,\n\t\tfmt.Sprintf(logFmt, c.String()),\n\t\tfmt.Sprintf(line, args...)))\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/consumer.go",
    "content": "package nsq\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"math/rand\"\n\t\"net\"\n\t\"net/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\n// Handler is the message processing interface for Consumer\n//\n// Implement this interface for handlers that return whether or not message\n// processing completed successfully.\n//\n// When the return value is nil Consumer will automatically handle FINishing.\n//\n// When the returned value is non-nil Consumer will automatically handle REQueing.\ntype Handler interface {\n\tHandleMessage(message *Message) error\n}\n\n// HandlerFunc is a convenience type to avoid having to declare a struct\n// to implement the Handler interface, it can be used like this:\n//\n// \tconsumer.AddHandler(nsq.HandlerFunc(func(m *Message) error {\n// \t\t// handle the message\n// \t}))\ntype HandlerFunc func(message *Message) error\n\n// HandleMessage implements the Handler interface\nfunc (h HandlerFunc) HandleMessage(m *Message) error {\n\treturn h(m)\n}\n\n// DiscoveryFilter is an interface accepted by `SetBehaviorDelegate()`\n// for filtering the nsqds returned from discovery via nsqlookupd\ntype DiscoveryFilter interface {\n\tFilter([]string) []string\n}\n\n// FailedMessageLogger is an interface that can be implemented by handlers that wish\n// to receive a callback when a message is deemed \"failed\" (i.e. the number of attempts\n// exceeded the Consumer specified MaxAttemptCount)\ntype FailedMessageLogger interface {\n\tLogFailedMessage(message *Message)\n}\n\n// ConsumerStats represents a snapshot of the state of a Consumer's connections and the messages\n// it has seen\ntype ConsumerStats struct {\n\tMessagesReceived uint64\n\tMessagesFinished uint64\n\tMessagesRequeued uint64\n\tConnections      int\n}\n\nvar instCount int64\n\ntype backoffSignal int\n\nconst (\n\tbackoffFlag backoffSignal = iota\n\tcontinueFlag\n\tresumeFlag\n)\n\n// Consumer is a high-level type to consume from NSQ.\n//\n// A Consumer instance is supplied a Handler that will be executed\n// concurrently via goroutines to handle processing the stream of messages\n// consumed from the specified topic/channel. See: Handler/HandlerFunc\n// for details on implementing the interface to create handlers.\n//\n// If configured, it will poll nsqlookupd instances and handle connection (and\n// reconnection) to any discovered nsqds.\ntype Consumer struct {\n\t// 64bit atomic vars need to be first for proper alignment on 32bit platforms\n\tmessagesReceived uint64\n\tmessagesFinished uint64\n\tmessagesRequeued uint64\n\ttotalRdyCount    int64\n\tbackoffDuration  int64\n\tbackoffCounter   int32\n\tmaxInFlight      int32\n\n\tmtx sync.RWMutex\n\n\tlogger   logger\n\tlogLvl   LogLevel\n\tlogGuard sync.RWMutex\n\n\tbehaviorDelegate interface{}\n\n\tid      int64\n\ttopic   string\n\tchannel string\n\tconfig  Config\n\n\trng *rand.Rand\n\n\tneedRDYRedistributed int32\n\n\tbackoffMtx sync.RWMutex\n\n\tincomingMessages chan *Message\n\n\trdyRetryMtx    sync.RWMutex\n\trdyRetryTimers map[string]*time.Timer\n\n\tpendingConnections map[string]*Conn\n\tconnections        map[string]*Conn\n\n\tnsqdTCPAddrs []string\n\n\t// used at connection close to force a possible reconnect\n\tlookupdRecheckChan chan int\n\tlookupdHTTPAddrs   []string\n\tlookupdQueryIndex  int\n\n\twg              sync.WaitGroup\n\trunningHandlers int32\n\tstopFlag        int32\n\tconnectedFlag   int32\n\tstopHandler     sync.Once\n\texitHandler     sync.Once\n\n\t// read from this channel to block until consumer is cleanly stopped\n\tStopChan chan int\n\texitChan chan int\n}\n\n// NewConsumer creates a new instance of Consumer for the specified topic/channel\n//\n// The only valid way to create a Config is via NewConfig, using a struct literal will panic.\n// After Config is passed into NewConsumer the values are no longer mutable (they are copied).\nfunc NewConsumer(topic string, channel string, config *Config) (*Consumer, error) {\n\tconfig.assertInitialized()\n\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !IsValidTopicName(topic) {\n\t\treturn nil, errors.New(\"invalid topic name\")\n\t}\n\n\tif !IsValidChannelName(channel) {\n\t\treturn nil, errors.New(\"invalid channel name\")\n\t}\n\n\tr := &Consumer{\n\t\tid: atomic.AddInt64(&instCount, 1),\n\n\t\ttopic:   topic,\n\t\tchannel: channel,\n\t\tconfig:  *config,\n\n\t\tlogger:      log.New(os.Stderr, \"\", log.Flags()),\n\t\tlogLvl:      LogLevelInfo,\n\t\tmaxInFlight: int32(config.MaxInFlight),\n\n\t\tincomingMessages: make(chan *Message),\n\n\t\trdyRetryTimers:     make(map[string]*time.Timer),\n\t\tpendingConnections: make(map[string]*Conn),\n\t\tconnections:        make(map[string]*Conn),\n\n\t\tlookupdRecheckChan: make(chan int, 1),\n\n\t\trng: rand.New(rand.NewSource(time.Now().UnixNano())),\n\n\t\tStopChan: make(chan int),\n\t\texitChan: make(chan int),\n\t}\n\tr.wg.Add(1)\n\tgo r.rdyLoop()\n\treturn r, nil\n}\n\n// Stats retrieves the current connection and message statistics for a Consumer\nfunc (r *Consumer) Stats() *ConsumerStats {\n\treturn &ConsumerStats{\n\t\tMessagesReceived: atomic.LoadUint64(&r.messagesReceived),\n\t\tMessagesFinished: atomic.LoadUint64(&r.messagesFinished),\n\t\tMessagesRequeued: atomic.LoadUint64(&r.messagesRequeued),\n\t\tConnections:      len(r.conns()),\n\t}\n}\n\nfunc (r *Consumer) conns() []*Conn {\n\tr.mtx.RLock()\n\tconns := make([]*Conn, 0, len(r.connections))\n\tfor _, c := range r.connections {\n\t\tconns = append(conns, c)\n\t}\n\tr.mtx.RUnlock()\n\treturn conns\n}\n\n// SetLogger assigns the logger to use as well as a level\n//\n// The logger parameter is an interface that requires the following\n// method to be implemented (such as the the stdlib log.Logger):\n//\n//    Output(calldepth int, s string)\n//\nfunc (r *Consumer) SetLogger(l logger, lvl LogLevel) {\n\tr.logGuard.Lock()\n\tdefer r.logGuard.Unlock()\n\n\tr.logger = l\n\tr.logLvl = lvl\n}\n\nfunc (r *Consumer) getLogger() (logger, LogLevel) {\n\tr.logGuard.RLock()\n\tdefer r.logGuard.RUnlock()\n\n\treturn r.logger, r.logLvl\n}\n\n// SetBehaviorDelegate takes a type implementing one or more\n// of the following interfaces that modify the behavior\n// of the `Consumer`:\n//\n//    DiscoveryFilter\n//\nfunc (r *Consumer) SetBehaviorDelegate(cb interface{}) {\n\tmatched := false\n\n\tif _, ok := cb.(DiscoveryFilter); ok {\n\t\tmatched = true\n\t}\n\n\tif !matched {\n\t\tpanic(\"behavior delegate does not have any recognized methods\")\n\t}\n\n\tr.behaviorDelegate = cb\n}\n\n// perConnMaxInFlight calculates the per-connection max-in-flight count.\n//\n// This may change dynamically based on the number of connections to nsqd the Consumer\n// is responsible for.\nfunc (r *Consumer) perConnMaxInFlight() int64 {\n\tb := float64(r.getMaxInFlight())\n\ts := b / float64(len(r.conns()))\n\treturn int64(math.Min(math.Max(1, s), b))\n}\n\n// IsStarved indicates whether any connections for this consumer are blocked on processing\n// before being able to receive more messages (ie. RDY count of 0 and not exiting)\nfunc (r *Consumer) IsStarved() bool {\n\tfor _, conn := range r.conns() {\n\t\tthreshold := int64(float64(atomic.LoadInt64(&conn.lastRdyCount)) * 0.85)\n\t\tinFlight := atomic.LoadInt64(&conn.messagesInFlight)\n\t\tif inFlight >= threshold && inFlight > 0 && !conn.IsClosing() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (r *Consumer) getMaxInFlight() int32 {\n\treturn atomic.LoadInt32(&r.maxInFlight)\n}\n\n// ChangeMaxInFlight sets a new maximum number of messages this comsumer instance\n// will allow in-flight, and updates all existing connections as appropriate.\n//\n// For example, ChangeMaxInFlight(0) would pause message flow\n//\n// If already connected, it updates the reader RDY state for each connection.\nfunc (r *Consumer) ChangeMaxInFlight(maxInFlight int) {\n\tif r.getMaxInFlight() == int32(maxInFlight) {\n\t\treturn\n\t}\n\n\tatomic.StoreInt32(&r.maxInFlight, int32(maxInFlight))\n\n\tfor _, c := range r.conns() {\n\t\tr.maybeUpdateRDY(c)\n\t}\n}\n\n// ConnectToNSQLookupd adds an nsqlookupd address to the list for this Consumer instance.\n//\n// If it is the first to be added, it initiates an HTTP request to discover nsqd\n// producers for the configured topic.\n//\n// A goroutine is spawned to handle continual polling.\nfunc (r *Consumer) ConnectToNSQLookupd(addr string) error {\n\tif atomic.LoadInt32(&r.stopFlag) == 1 {\n\t\treturn errors.New(\"consumer stopped\")\n\t}\n\tif atomic.LoadInt32(&r.runningHandlers) == 0 {\n\t\treturn errors.New(\"no handlers\")\n\t}\n\n\tif err := validatedLookupAddr(addr); err != nil {\n\t\treturn err\n\t}\n\n\tatomic.StoreInt32(&r.connectedFlag, 1)\n\n\tr.mtx.Lock()\n\tfor _, x := range r.lookupdHTTPAddrs {\n\t\tif x == addr {\n\t\t\tr.mtx.Unlock()\n\t\t\treturn nil\n\t\t}\n\t}\n\tr.lookupdHTTPAddrs = append(r.lookupdHTTPAddrs, addr)\n\tnumLookupd := len(r.lookupdHTTPAddrs)\n\tr.mtx.Unlock()\n\n\t// if this is the first one, kick off the go loop\n\tif numLookupd == 1 {\n\t\tr.queryLookupd()\n\t\tr.wg.Add(1)\n\t\tgo r.lookupdLoop()\n\t}\n\n\treturn nil\n}\n\n// ConnectToNSQLookupds adds multiple nsqlookupd address to the list for this Consumer instance.\n//\n// If adding the first address it initiates an HTTP request to discover nsqd\n// producers for the configured topic.\n//\n// A goroutine is spawned to handle continual polling.\nfunc (r *Consumer) ConnectToNSQLookupds(addresses []string) error {\n\tfor _, addr := range addresses {\n\t\terr := r.ConnectToNSQLookupd(addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc validatedLookupAddr(addr string) error {\n\tif strings.Contains(addr, \"/\") {\n\t\t_, err := url.Parse(addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tif !strings.Contains(addr, \":\") {\n\t\treturn errors.New(\"missing port\")\n\t}\n\treturn nil\n}\n\n// poll all known lookup servers every LookupdPollInterval\nfunc (r *Consumer) lookupdLoop() {\n\t// add some jitter so that multiple consumers discovering the same topic,\n\t// when restarted at the same time, dont all connect at once.\n\tjitter := time.Duration(int64(r.rng.Float64() *\n\t\tr.config.LookupdPollJitter * float64(r.config.LookupdPollInterval)))\n\tvar ticker *time.Ticker\n\n\tselect {\n\tcase <-time.After(jitter):\n\tcase <-r.exitChan:\n\t\tgoto exit\n\t}\n\n\tticker = time.NewTicker(r.config.LookupdPollInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tr.queryLookupd()\n\t\tcase <-r.lookupdRecheckChan:\n\t\t\tr.queryLookupd()\n\t\tcase <-r.exitChan:\n\t\t\tgoto exit\n\t\t}\n\t}\n\nexit:\n\tif ticker != nil {\n\t\tticker.Stop()\n\t}\n\tr.log(LogLevelInfo, \"exiting lookupdLoop\")\n\tr.wg.Done()\n}\n\n// return the next lookupd endpoint to query\n// keeping track of which one was last used\nfunc (r *Consumer) nextLookupdEndpoint() string {\n\tr.mtx.RLock()\n\tif r.lookupdQueryIndex >= len(r.lookupdHTTPAddrs) {\n\t\tr.lookupdQueryIndex = 0\n\t}\n\taddr := r.lookupdHTTPAddrs[r.lookupdQueryIndex]\n\tnum := len(r.lookupdHTTPAddrs)\n\tr.mtx.RUnlock()\n\tr.lookupdQueryIndex = (r.lookupdQueryIndex + 1) % num\n\n\turlString := addr\n\tif !strings.Contains(urlString, \"://\") {\n\t\turlString = \"http://\" + addr\n\t}\n\n\tu, err := url.Parse(urlString)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif u.Path == \"/\" || u.Path == \"\" {\n\t\tu.Path = \"/lookup\"\n\t}\n\n\tv, err := url.ParseQuery(u.RawQuery)\n\tv.Add(\"topic\", r.topic)\n\tu.RawQuery = v.Encode()\n\treturn u.String()\n}\n\ntype lookupResp struct {\n\tChannels  []string    `json:\"channels\"`\n\tProducers []*peerInfo `json:\"producers\"`\n\tTimestamp int64       `json:\"timestamp\"`\n}\n\ntype peerInfo struct {\n\tRemoteAddress    string `json:\"remote_address\"`\n\tHostname         string `json:\"hostname\"`\n\tBroadcastAddress string `json:\"broadcast_address\"`\n\tTCPPort          int    `json:\"tcp_port\"`\n\tHTTPPort         int    `json:\"http_port\"`\n\tVersion          string `json:\"version\"`\n}\n\n// make an HTTP req to one of the configured nsqlookupd instances to discover\n// which nsqd's provide the topic we are consuming.\n//\n// initiate a connection to any new producers that are identified.\nfunc (r *Consumer) queryLookupd() {\n\tendpoint := r.nextLookupdEndpoint()\n\n\tr.log(LogLevelInfo, \"querying nsqlookupd %s\", endpoint)\n\n\tvar data lookupResp\n\terr := apiRequestNegotiateV1(\"GET\", endpoint, nil, &data)\n\tif err != nil {\n\t\tr.log(LogLevelError, \"error querying nsqlookupd (%s) - %s\", endpoint, err)\n\t\treturn\n\t}\n\n\tvar nsqdAddrs []string\n\tfor _, producer := range data.Producers {\n\t\tbroadcastAddress := producer.BroadcastAddress\n\t\tport := producer.TCPPort\n\t\tjoined := net.JoinHostPort(broadcastAddress, strconv.Itoa(port))\n\t\tnsqdAddrs = append(nsqdAddrs, joined)\n\t}\n\t// apply filter\n\tif discoveryFilter, ok := r.behaviorDelegate.(DiscoveryFilter); ok {\n\t\tnsqdAddrs = discoveryFilter.Filter(nsqdAddrs)\n\t}\n\tfor _, addr := range nsqdAddrs {\n\t\terr = r.ConnectToNSQD(addr)\n\t\tif err != nil && err != ErrAlreadyConnected {\n\t\t\tr.log(LogLevelError, \"(%s) error connecting to nsqd - %s\", addr, err)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n// ConnectToNSQDs takes multiple nsqd addresses to connect directly to.\n//\n// It is recommended to use ConnectToNSQLookupd so that topics are discovered\n// automatically.  This method is useful when you want to connect to local instance.\nfunc (r *Consumer) ConnectToNSQDs(addresses []string) error {\n\tfor _, addr := range addresses {\n\t\terr := r.ConnectToNSQD(addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// ConnectToNSQD takes a nsqd address to connect directly to.\n//\n// It is recommended to use ConnectToNSQLookupd so that topics are discovered\n// automatically.  This method is useful when you want to connect to a single, local,\n// instance.\nfunc (r *Consumer) ConnectToNSQD(addr string) error {\n\tif atomic.LoadInt32(&r.stopFlag) == 1 {\n\t\treturn errors.New(\"consumer stopped\")\n\t}\n\n\tif atomic.LoadInt32(&r.runningHandlers) == 0 {\n\t\treturn errors.New(\"no handlers\")\n\t}\n\n\tatomic.StoreInt32(&r.connectedFlag, 1)\n\n\tlogger, logLvl := r.getLogger()\n\n\tconn := NewConn(addr, &r.config, &consumerConnDelegate{r})\n\tconn.SetLogger(logger, logLvl,\n\t\tfmt.Sprintf(\"%3d [%s/%s] (%%s)\", r.id, r.topic, r.channel))\n\n\tr.mtx.Lock()\n\t_, pendingOk := r.pendingConnections[addr]\n\t_, ok := r.connections[addr]\n\tif ok || pendingOk {\n\t\tr.mtx.Unlock()\n\t\treturn ErrAlreadyConnected\n\t}\n\tr.pendingConnections[addr] = conn\n\tif idx := indexOf(addr, r.nsqdTCPAddrs); idx == -1 {\n\t\tr.nsqdTCPAddrs = append(r.nsqdTCPAddrs, addr)\n\t}\n\tr.mtx.Unlock()\n\n\tr.log(LogLevelInfo, \"(%s) connecting to nsqd\", addr)\n\n\tcleanupConnection := func() {\n\t\tr.mtx.Lock()\n\t\tdelete(r.pendingConnections, addr)\n\t\tr.mtx.Unlock()\n\t\tconn.Close()\n\t}\n\n\tresp, err := conn.Connect()\n\tif err != nil {\n\t\tcleanupConnection()\n\t\treturn err\n\t}\n\n\tif resp != nil {\n\t\tif resp.MaxRdyCount < int64(r.getMaxInFlight()) {\n\t\t\tr.log(LogLevelWarning,\n\t\t\t\t\"(%s) max RDY count %d < consumer max in flight %d, truncation possible\",\n\t\t\t\tconn.String(), resp.MaxRdyCount, r.getMaxInFlight())\n\t\t}\n\t}\n\n\tcmd := Subscribe(r.topic, r.channel)\n\terr = conn.WriteCommand(cmd)\n\tif err != nil {\n\t\tcleanupConnection()\n\t\treturn fmt.Errorf(\"[%s] failed to subscribe to %s:%s - %s\",\n\t\t\tconn, r.topic, r.channel, err.Error())\n\t}\n\n\tr.mtx.Lock()\n\tdelete(r.pendingConnections, addr)\n\tr.connections[addr] = conn\n\tr.mtx.Unlock()\n\n\t// pre-emptive signal to existing connections to lower their RDY count\n\tfor _, c := range r.conns() {\n\t\tr.maybeUpdateRDY(c)\n\t}\n\n\treturn nil\n}\n\nfunc indexOf(n string, h []string) int {\n\tfor i, a := range h {\n\t\tif n == a {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n// DisconnectFromNSQD closes the connection to and removes the specified\n// `nsqd` address from the list\nfunc (r *Consumer) DisconnectFromNSQD(addr string) error {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\n\tidx := indexOf(addr, r.nsqdTCPAddrs)\n\tif idx == -1 {\n\t\treturn ErrNotConnected\n\t}\n\n\t// slice delete\n\tr.nsqdTCPAddrs = append(r.nsqdTCPAddrs[:idx], r.nsqdTCPAddrs[idx+1:]...)\n\n\tpendingConn, pendingOk := r.pendingConnections[addr]\n\tconn, ok := r.connections[addr]\n\n\tif ok {\n\t\tconn.Close()\n\t} else if pendingOk {\n\t\tpendingConn.Close()\n\t}\n\n\treturn nil\n}\n\n// DisconnectFromNSQLookupd removes the specified `nsqlookupd` address\n// from the list used for periodic discovery.\nfunc (r *Consumer) DisconnectFromNSQLookupd(addr string) error {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\n\tidx := indexOf(addr, r.lookupdHTTPAddrs)\n\tif idx == -1 {\n\t\treturn ErrNotConnected\n\t}\n\n\tif len(r.lookupdHTTPAddrs) == 1 {\n\t\treturn fmt.Errorf(\"cannot disconnect from only remaining nsqlookupd HTTP address %s\", addr)\n\t}\n\n\tr.lookupdHTTPAddrs = append(r.lookupdHTTPAddrs[:idx], r.lookupdHTTPAddrs[idx+1:]...)\n\n\treturn nil\n}\n\nfunc (r *Consumer) onConnMessage(c *Conn, msg *Message) {\n\tatomic.AddInt64(&r.totalRdyCount, -1)\n\tatomic.AddUint64(&r.messagesReceived, 1)\n\tr.incomingMessages <- msg\n\tr.maybeUpdateRDY(c)\n}\n\nfunc (r *Consumer) onConnMessageFinished(c *Conn, msg *Message) {\n\tatomic.AddUint64(&r.messagesFinished, 1)\n}\n\nfunc (r *Consumer) onConnMessageRequeued(c *Conn, msg *Message) {\n\tatomic.AddUint64(&r.messagesRequeued, 1)\n}\n\nfunc (r *Consumer) onConnBackoff(c *Conn) {\n\tr.startStopContinueBackoff(c, backoffFlag)\n}\n\nfunc (r *Consumer) onConnContinue(c *Conn) {\n\tr.startStopContinueBackoff(c, continueFlag)\n}\n\nfunc (r *Consumer) onConnResume(c *Conn) {\n\tr.startStopContinueBackoff(c, resumeFlag)\n}\n\nfunc (r *Consumer) onConnResponse(c *Conn, data []byte) {\n\tswitch {\n\tcase bytes.Equal(data, []byte(\"CLOSE_WAIT\")):\n\t\t// server is ready for us to close (it ack'd our StartClose)\n\t\t// we can assume we will not receive any more messages over this channel\n\t\t// (but we can still write back responses)\n\t\tr.log(LogLevelInfo, \"(%s) received CLOSE_WAIT from nsqd\", c.String())\n\t\tc.Close()\n\t}\n}\n\nfunc (r *Consumer) onConnError(c *Conn, data []byte) {}\n\nfunc (r *Consumer) onConnHeartbeat(c *Conn) {}\n\nfunc (r *Consumer) onConnIOError(c *Conn, err error) {\n\tc.Close()\n}\n\nfunc (r *Consumer) onConnClose(c *Conn) {\n\tvar hasRDYRetryTimer bool\n\n\t// remove this connections RDY count from the consumer's total\n\trdyCount := c.RDY()\n\tatomic.AddInt64(&r.totalRdyCount, -rdyCount)\n\n\tr.rdyRetryMtx.Lock()\n\tif timer, ok := r.rdyRetryTimers[c.String()]; ok {\n\t\t// stop any pending retry of an old RDY update\n\t\ttimer.Stop()\n\t\tdelete(r.rdyRetryTimers, c.String())\n\t\thasRDYRetryTimer = true\n\t}\n\tr.rdyRetryMtx.Unlock()\n\n\tr.mtx.Lock()\n\tdelete(r.connections, c.String())\n\tleft := len(r.connections)\n\tr.mtx.Unlock()\n\n\tr.log(LogLevelWarning, \"there are %d connections left alive\", left)\n\n\tif (hasRDYRetryTimer || rdyCount > 0) &&\n\t\t(int32(left) == r.getMaxInFlight() || r.inBackoff()) {\n\t\t// we're toggling out of (normal) redistribution cases and this conn\n\t\t// had a RDY count...\n\t\t//\n\t\t// trigger RDY redistribution to make sure this RDY is moved\n\t\t// to a new connection\n\t\tatomic.StoreInt32(&r.needRDYRedistributed, 1)\n\t}\n\n\t// we were the last one (and stopping)\n\tif atomic.LoadInt32(&r.stopFlag) == 1 {\n\t\tif left == 0 {\n\t\t\tr.stopHandlers()\n\t\t}\n\t\treturn\n\t}\n\n\tr.mtx.RLock()\n\tnumLookupd := len(r.lookupdHTTPAddrs)\n\treconnect := indexOf(c.String(), r.nsqdTCPAddrs) >= 0\n\tr.mtx.RUnlock()\n\tif numLookupd > 0 {\n\t\t// trigger a poll of the lookupd\n\t\tselect {\n\t\tcase r.lookupdRecheckChan <- 1:\n\t\tdefault:\n\t\t}\n\t} else if reconnect {\n\t\t// there are no lookupd and we still have this nsqd TCP address in our list...\n\t\t// try to reconnect after a bit\n\t\tgo func(addr string) {\n\t\t\tfor {\n\t\t\t\tr.log(LogLevelInfo, \"(%s) re-connecting in %s\", addr, r.config.LookupdPollInterval)\n\t\t\t\ttime.Sleep(r.config.LookupdPollInterval)\n\t\t\t\tif atomic.LoadInt32(&r.stopFlag) == 1 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tr.mtx.RLock()\n\t\t\t\treconnect := indexOf(addr, r.nsqdTCPAddrs) >= 0\n\t\t\t\tr.mtx.RUnlock()\n\t\t\t\tif !reconnect {\n\t\t\t\t\tr.log(LogLevelWarning, \"(%s) skipped reconnect after removal...\", addr)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr := r.ConnectToNSQD(addr)\n\t\t\t\tif err != nil && err != ErrAlreadyConnected {\n\t\t\t\t\tr.log(LogLevelError, \"(%s) error connecting to nsqd - %s\", addr, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}(c.String())\n\t}\n}\n\nfunc (r *Consumer) startStopContinueBackoff(conn *Conn, signal backoffSignal) {\n\t// prevent many async failures/successes from immediately resulting in\n\t// max backoff/normal rate (by ensuring that we dont continually incr/decr\n\t// the counter during a backoff period)\n\tr.backoffMtx.Lock()\n\tif r.inBackoffTimeout() {\n\t\tr.backoffMtx.Unlock()\n\t\treturn\n\t}\n\tdefer r.backoffMtx.Unlock()\n\n\t// update backoff state\n\tbackoffUpdated := false\n\tbackoffCounter := atomic.LoadInt32(&r.backoffCounter)\n\tswitch signal {\n\tcase resumeFlag:\n\t\tif backoffCounter > 0 {\n\t\t\tbackoffCounter--\n\t\t\tbackoffUpdated = true\n\t\t}\n\tcase backoffFlag:\n\t\tnextBackoff := r.config.BackoffStrategy.Calculate(int(backoffCounter) + 1)\n\t\tif nextBackoff <= r.config.MaxBackoffDuration {\n\t\t\tbackoffCounter++\n\t\t\tbackoffUpdated = true\n\t\t}\n\t}\n\tatomic.StoreInt32(&r.backoffCounter, backoffCounter)\n\n\tif r.backoffCounter == 0 && backoffUpdated {\n\t\t// exit backoff\n\t\tcount := r.perConnMaxInFlight()\n\t\tr.log(LogLevelWarning, \"exiting backoff, returning all to RDY %d\", count)\n\t\tfor _, c := range r.conns() {\n\t\t\tr.updateRDY(c, count)\n\t\t}\n\t} else if r.backoffCounter > 0 {\n\t\t// start or continue backoff\n\t\tbackoffDuration := r.config.BackoffStrategy.Calculate(int(backoffCounter))\n\n\t\tif backoffDuration > r.config.MaxBackoffDuration {\n\t\t\tbackoffDuration = r.config.MaxBackoffDuration\n\t\t}\n\n\t\tr.log(LogLevelWarning, \"backing off for %.04f seconds (backoff level %d), setting all to RDY 0\",\n\t\t\tbackoffDuration.Seconds(), backoffCounter)\n\n\t\t// send RDY 0 immediately (to *all* connections)\n\t\tfor _, c := range r.conns() {\n\t\t\tr.updateRDY(c, 0)\n\t\t}\n\n\t\tr.backoff(backoffDuration)\n\t}\n}\n\nfunc (r *Consumer) backoff(d time.Duration) {\n\tatomic.StoreInt64(&r.backoffDuration, d.Nanoseconds())\n\ttime.AfterFunc(d, r.resume)\n}\n\nfunc (r *Consumer) resume() {\n\tif atomic.LoadInt32(&r.stopFlag) == 1 {\n\t\tatomic.StoreInt64(&r.backoffDuration, 0)\n\t\treturn\n\t}\n\n\t// pick a random connection to test the waters\n\tconns := r.conns()\n\tif len(conns) == 0 {\n\t\tr.log(LogLevelWarning, \"no connection available to resume\")\n\t\tr.log(LogLevelWarning, \"backing off for %.04f seconds\", 1)\n\t\tr.backoff(time.Second)\n\t\treturn\n\t}\n\tidx := r.rng.Intn(len(conns))\n\tchoice := conns[idx]\n\n\tr.log(LogLevelWarning,\n\t\t\"(%s) backoff timeout expired, sending RDY 1\",\n\t\tchoice.String())\n\n\t// while in backoff only ever let 1 message at a time through\n\terr := r.updateRDY(choice, 1)\n\tif err != nil {\n\t\tr.log(LogLevelWarning, \"(%s) error resuming RDY 1 - %s\", choice.String(), err)\n\t\tr.log(LogLevelWarning, \"backing off for %.04f seconds\", 1)\n\t\tr.backoff(time.Second)\n\t\treturn\n\t}\n\n\tatomic.StoreInt64(&r.backoffDuration, 0)\n}\n\nfunc (r *Consumer) inBackoff() bool {\n\treturn atomic.LoadInt32(&r.backoffCounter) > 0\n}\n\nfunc (r *Consumer) inBackoffTimeout() bool {\n\treturn atomic.LoadInt64(&r.backoffDuration) > 0\n}\n\nfunc (r *Consumer) maybeUpdateRDY(conn *Conn) {\n\tinBackoff := r.inBackoff()\n\tinBackoffTimeout := r.inBackoffTimeout()\n\tif inBackoff || inBackoffTimeout {\n\t\tr.log(LogLevelDebug, \"(%s) skip sending RDY inBackoff:%v || inBackoffTimeout:%v\",\n\t\t\tconn, inBackoff, inBackoffTimeout)\n\t\treturn\n\t}\n\n\tremain := conn.RDY()\n\tlastRdyCount := conn.LastRDY()\n\tcount := r.perConnMaxInFlight()\n\n\t// refill when at 1, or at 25%, or if connections have changed and we're imbalanced\n\tif remain <= 1 || remain < (lastRdyCount/4) || (count > 0 && count < remain) {\n\t\tr.log(LogLevelDebug, \"(%s) sending RDY %d (%d remain from last RDY %d)\",\n\t\t\tconn, count, remain, lastRdyCount)\n\t\tr.updateRDY(conn, count)\n\t} else {\n\t\tr.log(LogLevelDebug, \"(%s) skip sending RDY %d (%d remain out of last RDY %d)\",\n\t\t\tconn, count, remain, lastRdyCount)\n\t}\n}\n\nfunc (r *Consumer) rdyLoop() {\n\tredistributeTicker := time.NewTicker(r.config.RDYRedistributeInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-redistributeTicker.C:\n\t\t\tr.redistributeRDY()\n\t\tcase <-r.exitChan:\n\t\t\tgoto exit\n\t\t}\n\t}\n\nexit:\n\tredistributeTicker.Stop()\n\tr.log(LogLevelInfo, \"rdyLoop exiting\")\n\tr.wg.Done()\n}\n\nfunc (r *Consumer) updateRDY(c *Conn, count int64) error {\n\tif c.IsClosing() {\n\t\treturn ErrClosing\n\t}\n\n\t// never exceed the nsqd's configured max RDY count\n\tif count > c.MaxRDY() {\n\t\tcount = c.MaxRDY()\n\t}\n\n\t// stop any pending retry of an old RDY update\n\tr.rdyRetryMtx.Lock()\n\tif timer, ok := r.rdyRetryTimers[c.String()]; ok {\n\t\ttimer.Stop()\n\t\tdelete(r.rdyRetryTimers, c.String())\n\t}\n\tr.rdyRetryMtx.Unlock()\n\n\t// never exceed our global max in flight. truncate if possible.\n\t// this could help a new connection get partial max-in-flight\n\trdyCount := c.RDY()\n\tmaxPossibleRdy := int64(r.getMaxInFlight()) - atomic.LoadInt64(&r.totalRdyCount) + rdyCount\n\tif maxPossibleRdy > 0 && maxPossibleRdy < count {\n\t\tcount = maxPossibleRdy\n\t}\n\tif maxPossibleRdy <= 0 && count > 0 {\n\t\tif rdyCount == 0 {\n\t\t\t// we wanted to exit a zero RDY count but we couldn't send it...\n\t\t\t// in order to prevent eternal starvation we reschedule this attempt\n\t\t\t// (if any other RDY update succeeds this timer will be stopped)\n\t\t\tr.rdyRetryMtx.Lock()\n\t\t\tr.rdyRetryTimers[c.String()] = time.AfterFunc(5*time.Second,\n\t\t\t\tfunc() {\n\t\t\t\t\tr.updateRDY(c, count)\n\t\t\t\t})\n\t\t\tr.rdyRetryMtx.Unlock()\n\t\t}\n\t\treturn ErrOverMaxInFlight\n\t}\n\n\treturn r.sendRDY(c, count)\n}\n\nfunc (r *Consumer) sendRDY(c *Conn, count int64) error {\n\tif count == 0 && c.LastRDY() == 0 {\n\t\t// no need to send. It's already that RDY count\n\t\treturn nil\n\t}\n\n\tatomic.AddInt64(&r.totalRdyCount, -c.RDY()+count)\n\tc.SetRDY(count)\n\terr := c.WriteCommand(Ready(int(count)))\n\tif err != nil {\n\t\tr.log(LogLevelError, \"(%s) error sending RDY %d - %s\", c.String(), count, err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (r *Consumer) redistributeRDY() {\n\tif r.inBackoffTimeout() {\n\t\treturn\n\t}\n\n\t// if an external heuristic set needRDYRedistributed we want to wait\n\t// until we can actually redistribute to proceed\n\tconns := r.conns()\n\tif len(conns) == 0 {\n\t\treturn\n\t}\n\n\tmaxInFlight := r.getMaxInFlight()\n\tif len(conns) > int(maxInFlight) {\n\t\tr.log(LogLevelDebug, \"redistributing RDY state (%d conns > %d max_in_flight)\",\n\t\t\tlen(conns), maxInFlight)\n\t\tatomic.StoreInt32(&r.needRDYRedistributed, 1)\n\t}\n\n\tif r.inBackoff() && len(conns) > 1 {\n\t\tr.log(LogLevelDebug, \"redistributing RDY state (in backoff and %d conns > 1)\", len(conns))\n\t\tatomic.StoreInt32(&r.needRDYRedistributed, 1)\n\t}\n\n\tif !atomic.CompareAndSwapInt32(&r.needRDYRedistributed, 1, 0) {\n\t\treturn\n\t}\n\n\tpossibleConns := make([]*Conn, 0, len(conns))\n\tfor _, c := range conns {\n\t\tlastMsgDuration := time.Now().Sub(c.LastMessageTime())\n\t\trdyCount := c.RDY()\n\t\tr.log(LogLevelDebug, \"(%s) rdy: %d (last message received %s)\",\n\t\t\tc.String(), rdyCount, lastMsgDuration)\n\t\tif rdyCount > 0 && lastMsgDuration > r.config.LowRdyIdleTimeout {\n\t\t\tr.log(LogLevelDebug, \"(%s) idle connection, giving up RDY\", c.String())\n\t\t\tr.updateRDY(c, 0)\n\t\t}\n\t\tpossibleConns = append(possibleConns, c)\n\t}\n\n\tavailableMaxInFlight := int64(maxInFlight) - atomic.LoadInt64(&r.totalRdyCount)\n\tif r.inBackoff() {\n\t\tavailableMaxInFlight = 1 - atomic.LoadInt64(&r.totalRdyCount)\n\t}\n\n\tfor len(possibleConns) > 0 && availableMaxInFlight > 0 {\n\t\tavailableMaxInFlight--\n\t\ti := r.rng.Int() % len(possibleConns)\n\t\tc := possibleConns[i]\n\t\t// delete\n\t\tpossibleConns = append(possibleConns[:i], possibleConns[i+1:]...)\n\t\tr.log(LogLevelDebug, \"(%s) redistributing RDY\", c.String())\n\t\tr.updateRDY(c, 1)\n\t}\n}\n\n// Stop will initiate a graceful stop of the Consumer (permanent)\n//\n// NOTE: receive on StopChan to block until this process completes\nfunc (r *Consumer) Stop() {\n\tif !atomic.CompareAndSwapInt32(&r.stopFlag, 0, 1) {\n\t\treturn\n\t}\n\n\tr.log(LogLevelInfo, \"stopping...\")\n\n\tif len(r.conns()) == 0 {\n\t\tr.stopHandlers()\n\t} else {\n\t\tfor _, c := range r.conns() {\n\t\t\terr := c.WriteCommand(StartClose())\n\t\t\tif err != nil {\n\t\t\t\tr.log(LogLevelError, \"(%s) error sending CLS - %s\", c.String(), err)\n\t\t\t}\n\t\t}\n\n\t\ttime.AfterFunc(time.Second*30, func() {\n\t\t\t// if we've waited this long handlers are blocked on processing messages\n\t\t\t// so we can't just stopHandlers (if any adtl. messages were pending processing\n\t\t\t// we would cause a panic on channel close)\n\t\t\t//\n\t\t\t// instead, we just bypass handler closing and skip to the final exit\n\t\t\tr.exit()\n\t\t})\n\t}\n}\n\nfunc (r *Consumer) stopHandlers() {\n\tr.stopHandler.Do(func() {\n\t\tr.log(LogLevelInfo, \"stopping handlers\")\n\t\tclose(r.incomingMessages)\n\t})\n}\n\n// AddHandler sets the Handler for messages received by this Consumer. This can be called\n// multiple times to add additional handlers. Handler will have a 1:1 ratio to message handling goroutines.\n//\n// This panics if called after connecting to NSQD or NSQ Lookupd\n//\n// (see Handler or HandlerFunc for details on implementing this interface)\nfunc (r *Consumer) AddHandler(handler Handler) {\n\tr.AddConcurrentHandlers(handler, 1)\n}\n\n// AddConcurrentHandlers sets the Handler for messages received by this Consumer.  It\n// takes a second argument which indicates the number of goroutines to spawn for\n// message handling.\n//\n// This panics if called after connecting to NSQD or NSQ Lookupd\n//\n// (see Handler or HandlerFunc for details on implementing this interface)\nfunc (r *Consumer) AddConcurrentHandlers(handler Handler, concurrency int) {\n\tif atomic.LoadInt32(&r.connectedFlag) == 1 {\n\t\tpanic(\"already connected\")\n\t}\n\n\tatomic.AddInt32(&r.runningHandlers, int32(concurrency))\n\tfor i := 0; i < concurrency; i++ {\n\t\tgo r.handlerLoop(handler)\n\t}\n}\n\nfunc (r *Consumer) handlerLoop(handler Handler) {\n\tr.log(LogLevelDebug, \"starting Handler\")\n\n\tfor {\n\t\tmessage, ok := <-r.incomingMessages\n\t\tif !ok {\n\t\t\tgoto exit\n\t\t}\n\n\t\tif r.shouldFailMessage(message, handler) {\n\t\t\tmessage.Finish()\n\t\t\tcontinue\n\t\t}\n\n\t\terr := handler.HandleMessage(message)\n\t\tif err != nil {\n\t\t\tr.log(LogLevelError, \"Handler returned error (%s) for msg %s\", err, message.ID)\n\t\t\tif !message.IsAutoResponseDisabled() {\n\t\t\t\tmessage.Requeue(-1)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif !message.IsAutoResponseDisabled() {\n\t\t\tmessage.Finish()\n\t\t}\n\t}\n\nexit:\n\tr.log(LogLevelDebug, \"stopping Handler\")\n\tif atomic.AddInt32(&r.runningHandlers, -1) == 0 {\n\t\tr.exit()\n\t}\n}\n\nfunc (r *Consumer) shouldFailMessage(message *Message, handler interface{}) bool {\n\t// message passed the max number of attempts\n\tif r.config.MaxAttempts > 0 && message.Attempts > r.config.MaxAttempts {\n\t\tr.log(LogLevelWarning, \"msg %s attempted %d times, giving up\",\n\t\t\tmessage.ID, message.Attempts)\n\n\t\tlogger, ok := handler.(FailedMessageLogger)\n\t\tif ok {\n\t\t\tlogger.LogFailedMessage(message)\n\t\t}\n\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (r *Consumer) exit() {\n\tr.exitHandler.Do(func() {\n\t\tclose(r.exitChan)\n\t\tr.wg.Wait()\n\t\tclose(r.StopChan)\n\t})\n}\n\nfunc (r *Consumer) log(lvl LogLevel, line string, args ...interface{}) {\n\tlogger, logLvl := r.getLogger()\n\n\tif logger == nil {\n\t\treturn\n\t}\n\n\tif logLvl > lvl {\n\t\treturn\n\t}\n\n\tlogger.Output(2, fmt.Sprintf(\"%-4s %3d [%s/%s] %s\",\n\t\tlvl, r.id, r.topic, r.channel,\n\t\tfmt.Sprintf(line, args...)))\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/consumer_test.go",
    "content": "package nsq\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype MyTestHandler struct {\n\tt                *testing.T\n\tq                *Consumer\n\tmessagesSent     int\n\tmessagesReceived int\n\tmessagesFailed   int\n}\n\nvar nullLogger = log.New(ioutil.Discard, \"\", log.LstdFlags)\n\nfunc (h *MyTestHandler) LogFailedMessage(message *Message) {\n\th.messagesFailed++\n\th.q.Stop()\n}\n\nfunc (h *MyTestHandler) HandleMessage(message *Message) error {\n\tif string(message.Body) == \"TOBEFAILED\" {\n\t\th.messagesReceived++\n\t\treturn errors.New(\"fail this message\")\n\t}\n\n\tdata := struct {\n\t\tMsg string\n\t}{}\n\n\terr := json.Unmarshal(message.Body, &data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsg := data.Msg\n\tif msg != \"single\" && msg != \"double\" {\n\t\th.t.Error(\"message 'action' was not correct: \", msg, data)\n\t}\n\th.messagesReceived++\n\treturn nil\n}\n\nfunc SendMessage(t *testing.T, port int, topic string, method string, body []byte) {\n\thttpclient := &http.Client{}\n\tendpoint := fmt.Sprintf(\"http://127.0.0.1:%d/%s?topic=%s\", port, method, topic)\n\treq, err := http.NewRequest(\"POST\", endpoint, bytes.NewBuffer(body))\n\tresp, err := httpclient.Do(req)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t\treturn\n\t}\n\tresp.Body.Close()\n}\n\nfunc TestConsumer(t *testing.T) {\n\tconsumerTest(t, nil)\n}\n\nfunc TestConsumerTLS(t *testing.T) {\n\tconsumerTest(t, func(c *Config) {\n\t\tc.TlsV1 = true\n\t\tc.TlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t})\n}\n\nfunc TestConsumerDeflate(t *testing.T) {\n\tconsumerTest(t, func(c *Config) {\n\t\tc.Deflate = true\n\t})\n}\n\nfunc TestConsumerSnappy(t *testing.T) {\n\tconsumerTest(t, func(c *Config) {\n\t\tc.Snappy = true\n\t})\n}\n\nfunc TestConsumerTLSDeflate(t *testing.T) {\n\tconsumerTest(t, func(c *Config) {\n\t\tc.TlsV1 = true\n\t\tc.TlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t\tc.Deflate = true\n\t})\n}\n\nfunc TestConsumerTLSSnappy(t *testing.T) {\n\tconsumerTest(t, func(c *Config) {\n\t\tc.TlsV1 = true\n\t\tc.TlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t\tc.Snappy = true\n\t})\n}\n\nfunc TestConsumerTLSClientCert(t *testing.T) {\n\tenvDl := os.Getenv(\"NSQ_DOWNLOAD\")\n\tif strings.HasPrefix(envDl, \"nsq-0.2.24\") || strings.HasPrefix(envDl, \"nsq-0.2.27\") {\n\t\tt.Log(\"skipping due to older nsqd\")\n\t\treturn\n\t}\n\tcert, _ := tls.LoadX509KeyPair(\"./test/client.pem\", \"./test/client.key\")\n\tconsumerTest(t, func(c *Config) {\n\t\tc.TlsV1 = true\n\t\tc.TlsConfig = &tls.Config{\n\t\t\tCertificates:       []tls.Certificate{cert},\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t})\n}\n\nfunc TestConsumerTLSClientCertViaSet(t *testing.T) {\n\tenvDl := os.Getenv(\"NSQ_DOWNLOAD\")\n\tif strings.HasPrefix(envDl, \"nsq-0.2.24\") || strings.HasPrefix(envDl, \"nsq-0.2.27\") {\n\t\tt.Log(\"skipping due to older nsqd\")\n\t\treturn\n\t}\n\tconsumerTest(t, func(c *Config) {\n\t\tc.Set(\"tls_v1\", true)\n\t\tc.Set(\"tls_cert\", \"./test/client.pem\")\n\t\tc.Set(\"tls_key\", \"./test/client.key\")\n\t\tc.Set(\"tls_insecure_skip_verify\", true)\n\t})\n}\n\nfunc consumerTest(t *testing.T, cb func(c *Config)) {\n\tconfig := NewConfig()\n\tladdr := \"127.0.0.1\"\n\t// so that the test can simulate binding consumer to specified address\n\tconfig.LocalAddr, _ = net.ResolveTCPAddr(\"tcp\", laddr+\":0\")\n\t// so that the test can simulate reaching max requeues and a call to LogFailedMessage\n\tconfig.DefaultRequeueDelay = 0\n\t// so that the test wont timeout from backing off\n\tconfig.MaxBackoffDuration = time.Millisecond * 50\n\tif cb != nil {\n\t\tcb(config)\n\t}\n\ttopicName := \"rdr_test\"\n\tif config.Deflate {\n\t\ttopicName = topicName + \"_deflate\"\n\t} else if config.Snappy {\n\t\ttopicName = topicName + \"_snappy\"\n\t}\n\tif config.TlsV1 {\n\t\ttopicName = topicName + \"_tls\"\n\t}\n\ttopicName = topicName + strconv.Itoa(int(time.Now().Unix()))\n\tq, _ := NewConsumer(topicName, \"ch\", config)\n\t// q.SetLogger(nullLogger, LogLevelInfo)\n\n\th := &MyTestHandler{\n\t\tt: t,\n\t\tq: q,\n\t}\n\tq.AddHandler(h)\n\n\tSendMessage(t, 4151, topicName, \"put\", []byte(`{\"msg\":\"single\"}`))\n\tSendMessage(t, 4151, topicName, \"mput\", []byte(\"{\\\"msg\\\":\\\"double\\\"}\\n{\\\"msg\\\":\\\"double\\\"}\"))\n\tSendMessage(t, 4151, topicName, \"put\", []byte(\"TOBEFAILED\"))\n\th.messagesSent = 4\n\n\taddr := \"127.0.0.1:4150\"\n\terr := q.ConnectToNSQD(addr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tstats := q.Stats()\n\tif stats.Connections == 0 {\n\t\tt.Fatal(\"stats report 0 connections (should be > 0)\")\n\t}\n\n\terr = q.ConnectToNSQD(addr)\n\tif err == nil {\n\t\tt.Fatal(\"should not be able to connect to the same NSQ twice\")\n\t}\n\n\tconn := q.conns()[0]\n\tif !strings.HasPrefix(conn.conn.LocalAddr().String(), laddr) {\n\t\tt.Fatal(\"connection should be bound to the specified address:\", conn.conn.LocalAddr())\n\t}\n\n\terr = q.DisconnectFromNSQD(\"1.2.3.4:4150\")\n\tif err == nil {\n\t\tt.Fatal(\"should not be able to disconnect from an unknown nsqd\")\n\t}\n\n\terr = q.ConnectToNSQD(\"1.2.3.4:4150\")\n\tif err == nil {\n\t\tt.Fatal(\"should not be able to connect to non-existent nsqd\")\n\t}\n\n\terr = q.DisconnectFromNSQD(\"1.2.3.4:4150\")\n\tif err != nil {\n\t\tt.Fatal(\"should be able to disconnect from an nsqd - \" + err.Error())\n\t}\n\n\t<-q.StopChan\n\n\tstats = q.Stats()\n\tif stats.Connections != 0 {\n\t\tt.Fatalf(\"stats report %d active connections (should be 0)\", stats.Connections)\n\t}\n\n\tstats = q.Stats()\n\tif stats.MessagesReceived != uint64(h.messagesReceived+h.messagesFailed) {\n\t\tt.Fatalf(\"stats report %d messages received (should be %d)\",\n\t\t\tstats.MessagesReceived,\n\t\t\th.messagesReceived+h.messagesFailed)\n\t}\n\n\tif h.messagesReceived != 8 || h.messagesSent != 4 {\n\t\tt.Fatalf(\"end of test. should have handled a diff number of messages (got %d, sent %d)\", h.messagesReceived, h.messagesSent)\n\t}\n\tif h.messagesFailed != 1 {\n\t\tt.Fatal(\"failed message not done\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/delegates.go",
    "content": "package nsq\n\nimport \"time\"\n\ntype logger interface {\n\tOutput(calldepth int, s string) error\n}\n\n// LogLevel specifies the severity of a given log message\ntype LogLevel int\n\n// Log levels\nconst (\n\tLogLevelDebug LogLevel = iota\n\tLogLevelInfo\n\tLogLevelWarning\n\tLogLevelError\n)\n\n// String returns the string form for a given LogLevel\nfunc (lvl LogLevel) String() string {\n\tswitch lvl {\n\tcase LogLevelInfo:\n\t\treturn \"INF\"\n\tcase LogLevelWarning:\n\t\treturn \"WRN\"\n\tcase LogLevelError:\n\t\treturn \"ERR\"\n\t}\n\treturn \"DBG\"\n}\n\n// MessageDelegate is an interface of methods that are used as\n// callbacks in Message\ntype MessageDelegate interface {\n\t// OnFinish is called when the Finish() method\n\t// is triggered on the Message\n\tOnFinish(*Message)\n\n\t// OnRequeue is called when the Requeue() method\n\t// is triggered on the Message\n\tOnRequeue(m *Message, delay time.Duration, backoff bool)\n\n\t// OnTouch is called when the Touch() method\n\t// is triggered on the Message\n\tOnTouch(*Message)\n}\n\ntype connMessageDelegate struct {\n\tc *Conn\n}\n\nfunc (d *connMessageDelegate) OnFinish(m *Message) { d.c.onMessageFinish(m) }\nfunc (d *connMessageDelegate) OnRequeue(m *Message, t time.Duration, b bool) {\n\td.c.onMessageRequeue(m, t, b)\n}\nfunc (d *connMessageDelegate) OnTouch(m *Message) { d.c.onMessageTouch(m) }\n\n// ConnDelegate is an interface of methods that are used as\n// callbacks in Conn\ntype ConnDelegate interface {\n\t// OnResponse is called when the connection\n\t// receives a FrameTypeResponse from nsqd\n\tOnResponse(*Conn, []byte)\n\n\t// OnError is called when the connection\n\t// receives a FrameTypeError from nsqd\n\tOnError(*Conn, []byte)\n\n\t// OnMessage is called when the connection\n\t// receives a FrameTypeMessage from nsqd\n\tOnMessage(*Conn, *Message)\n\n\t// OnMessageFinished is called when the connection\n\t// handles a FIN command from a message handler\n\tOnMessageFinished(*Conn, *Message)\n\n\t// OnMessageRequeued is called when the connection\n\t// handles a REQ command from a message handler\n\tOnMessageRequeued(*Conn, *Message)\n\n\t// OnBackoff is called when the connection triggers a backoff state\n\tOnBackoff(*Conn)\n\n\t// OnContinue is called when the connection finishes a message without adjusting backoff state\n\tOnContinue(*Conn)\n\n\t// OnResume is called when the connection triggers a resume state\n\tOnResume(*Conn)\n\n\t// OnIOError is called when the connection experiences\n\t// a low-level TCP transport error\n\tOnIOError(*Conn, error)\n\n\t// OnHeartbeat is called when the connection\n\t// receives a heartbeat from nsqd\n\tOnHeartbeat(*Conn)\n\n\t// OnClose is called when the connection\n\t// closes, after all cleanup\n\tOnClose(*Conn)\n}\n\n// keeps the exported Consumer struct clean of the exported methods\n// required to implement the ConnDelegate interface\ntype consumerConnDelegate struct {\n\tr *Consumer\n}\n\nfunc (d *consumerConnDelegate) OnResponse(c *Conn, data []byte)       { d.r.onConnResponse(c, data) }\nfunc (d *consumerConnDelegate) OnError(c *Conn, data []byte)          { d.r.onConnError(c, data) }\nfunc (d *consumerConnDelegate) OnMessage(c *Conn, m *Message)         { d.r.onConnMessage(c, m) }\nfunc (d *consumerConnDelegate) OnMessageFinished(c *Conn, m *Message) { d.r.onConnMessageFinished(c, m) }\nfunc (d *consumerConnDelegate) OnMessageRequeued(c *Conn, m *Message) { d.r.onConnMessageRequeued(c, m) }\nfunc (d *consumerConnDelegate) OnBackoff(c *Conn)                     { d.r.onConnBackoff(c) }\nfunc (d *consumerConnDelegate) OnContinue(c *Conn)                    { d.r.onConnContinue(c) }\nfunc (d *consumerConnDelegate) OnResume(c *Conn)                      { d.r.onConnResume(c) }\nfunc (d *consumerConnDelegate) OnIOError(c *Conn, err error)          { d.r.onConnIOError(c, err) }\nfunc (d *consumerConnDelegate) OnHeartbeat(c *Conn)                   { d.r.onConnHeartbeat(c) }\nfunc (d *consumerConnDelegate) OnClose(c *Conn)                       { d.r.onConnClose(c) }\n\n// keeps the exported Producer struct clean of the exported methods\n// required to implement the ConnDelegate interface\ntype producerConnDelegate struct {\n\tw *Producer\n}\n\nfunc (d *producerConnDelegate) OnResponse(c *Conn, data []byte)       { d.w.onConnResponse(c, data) }\nfunc (d *producerConnDelegate) OnError(c *Conn, data []byte)          { d.w.onConnError(c, data) }\nfunc (d *producerConnDelegate) OnMessage(c *Conn, m *Message)         {}\nfunc (d *producerConnDelegate) OnMessageFinished(c *Conn, m *Message) {}\nfunc (d *producerConnDelegate) OnMessageRequeued(c *Conn, m *Message) {}\nfunc (d *producerConnDelegate) OnBackoff(c *Conn)                     {}\nfunc (d *producerConnDelegate) OnContinue(c *Conn)                    {}\nfunc (d *producerConnDelegate) OnResume(c *Conn)                      {}\nfunc (d *producerConnDelegate) OnIOError(c *Conn, err error)          { d.w.onConnIOError(c, err) }\nfunc (d *producerConnDelegate) OnHeartbeat(c *Conn)                   { d.w.onConnHeartbeat(c) }\nfunc (d *producerConnDelegate) OnClose(c *Conn)                       { d.w.onConnClose(c) }\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/errors.go",
    "content": "package nsq\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n// ErrNotConnected is returned when a publish command is made\n// against a Producer that is not connected\nvar ErrNotConnected = errors.New(\"not connected\")\n\n// ErrStopped is returned when a publish command is\n// made against a Producer that has been stopped\nvar ErrStopped = errors.New(\"stopped\")\n\n// ErrClosing is returned when a connection is closing\nvar ErrClosing = errors.New(\"closing\")\n\n// ErrAlreadyConnected is returned from ConnectToNSQD when already connected\nvar ErrAlreadyConnected = errors.New(\"already connected\")\n\n// ErrOverMaxInFlight is returned from Consumer if over max-in-flight\nvar ErrOverMaxInFlight = errors.New(\"over configure max-inflight\")\n\n// ErrIdentify is returned from Conn as part of the IDENTIFY handshake\ntype ErrIdentify struct {\n\tReason string\n}\n\n// Error returns a stringified error\nfunc (e ErrIdentify) Error() string {\n\treturn fmt.Sprintf(\"failed to IDENTIFY - %s\", e.Reason)\n}\n\n// ErrProtocol is returned from Producer when encountering\n// an NSQ protocol level error\ntype ErrProtocol struct {\n\tReason string\n}\n\n// Error returns a stringified error\nfunc (e ErrProtocol) Error() string {\n\treturn e.Reason\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/message.go",
    "content": "package nsq\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\n// The number of bytes for a Message.ID\nconst MsgIDLength = 16\n\n// MessageID is the ASCII encoded hexadecimal message ID\ntype MessageID [MsgIDLength]byte\n\n// Message is the fundamental data type containing\n// the id, body, and metadata\ntype Message struct {\n\tID        MessageID\n\tBody      []byte\n\tTimestamp int64\n\tAttempts  uint16\n\n\tNSQDAddress string\n\n\tDelegate MessageDelegate\n\n\tautoResponseDisabled int32\n\tresponded            int32\n}\n\n// NewMessage creates a Message, initializes some metadata,\n// and returns a pointer\nfunc NewMessage(id MessageID, body []byte) *Message {\n\treturn &Message{\n\t\tID:        id,\n\t\tBody:      body,\n\t\tTimestamp: time.Now().UnixNano(),\n\t}\n}\n\n// DisableAutoResponse disables the automatic response that\n// would normally be sent when a handler.HandleMessage\n// returns (FIN/REQ based on the error value returned).\n//\n// This is useful if you want to batch, buffer, or asynchronously\n// respond to messages.\nfunc (m *Message) DisableAutoResponse() {\n\tatomic.StoreInt32(&m.autoResponseDisabled, 1)\n}\n\n// IsAutoResponseDisabled indicates whether or not this message\n// will be responded to automatically\nfunc (m *Message) IsAutoResponseDisabled() bool {\n\treturn atomic.LoadInt32(&m.autoResponseDisabled) == 1\n}\n\n// HasResponded indicates whether or not this message has been responded to\nfunc (m *Message) HasResponded() bool {\n\treturn atomic.LoadInt32(&m.responded) == 1\n}\n\n// Finish sends a FIN command to the nsqd which\n// sent this message\nfunc (m *Message) Finish() {\n\tif !atomic.CompareAndSwapInt32(&m.responded, 0, 1) {\n\t\treturn\n\t}\n\tm.Delegate.OnFinish(m)\n}\n\n// Touch sends a TOUCH command to the nsqd which\n// sent this message\nfunc (m *Message) Touch() {\n\tif m.HasResponded() {\n\t\treturn\n\t}\n\tm.Delegate.OnTouch(m)\n}\n\n// Requeue sends a REQ command to the nsqd which\n// sent this message, using the supplied delay.\n//\n// A delay of -1 will automatically calculate\n// based on the number of attempts and the\n// configured default_requeue_delay\nfunc (m *Message) Requeue(delay time.Duration) {\n\tm.doRequeue(delay, true)\n}\n\n// RequeueWithoutBackoff sends a REQ command to the nsqd which\n// sent this message, using the supplied delay.\n//\n// Notably, using this method to respond does not trigger a backoff\n// event on the configured Delegate.\nfunc (m *Message) RequeueWithoutBackoff(delay time.Duration) {\n\tm.doRequeue(delay, false)\n}\n\nfunc (m *Message) doRequeue(delay time.Duration, backoff bool) {\n\tif !atomic.CompareAndSwapInt32(&m.responded, 0, 1) {\n\t\treturn\n\t}\n\tm.Delegate.OnRequeue(m, delay, backoff)\n}\n\n// WriteTo implements the WriterTo interface and serializes\n// the message into the supplied producer.\n//\n// It is suggested that the target Writer is buffered to\n// avoid performing many system calls.\nfunc (m *Message) WriteTo(w io.Writer) (int64, error) {\n\tvar buf [10]byte\n\tvar total int64\n\n\tbinary.BigEndian.PutUint64(buf[:8], uint64(m.Timestamp))\n\tbinary.BigEndian.PutUint16(buf[8:10], uint16(m.Attempts))\n\n\tn, err := w.Write(buf[:])\n\ttotal += int64(n)\n\tif err != nil {\n\t\treturn total, err\n\t}\n\n\tn, err = w.Write(m.ID[:])\n\ttotal += int64(n)\n\tif err != nil {\n\t\treturn total, err\n\t}\n\n\tn, err = w.Write(m.Body)\n\ttotal += int64(n)\n\tif err != nil {\n\t\treturn total, err\n\t}\n\n\treturn total, nil\n}\n\n// DecodeMessage deseralizes data (as []byte) and creates a new Message\nfunc DecodeMessage(b []byte) (*Message, error) {\n\tvar msg Message\n\n\tmsg.Timestamp = int64(binary.BigEndian.Uint64(b[:8]))\n\tmsg.Attempts = binary.BigEndian.Uint16(b[8:10])\n\n\tbuf := bytes.NewBuffer(b[10:])\n\n\t_, err := io.ReadFull(buf, msg.ID[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsg.Body, err = ioutil.ReadAll(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &msg, nil\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/mock_test.go",
    "content": "package nsq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype tbLog interface {\n\tLog(...interface{})\n}\n\ntype testLogger struct {\n\ttbLog\n}\n\nfunc (tl *testLogger) Output(maxdepth int, s string) error {\n\ttl.Log(s)\n\treturn nil\n}\n\nfunc newTestLogger(tbl tbLog) logger {\n\treturn &testLogger{tbl}\n}\n\ntype instruction struct {\n\tdelay     time.Duration\n\tframeType int32\n\tbody      []byte\n}\n\ntype mockNSQD struct {\n\tscript      []instruction\n\tgot         [][]byte\n\ttcpAddr     *net.TCPAddr\n\ttcpListener net.Listener\n\texitChan    chan int\n}\n\nfunc newMockNSQD(script []instruction, addr string) *mockNSQD {\n\tn := &mockNSQD{\n\t\tscript:   script,\n\t\texitChan: make(chan int),\n\t}\n\n\ttcpListener, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: listen (%s) failed - %s\", n.tcpAddr.String(), err)\n\t}\n\tn.tcpListener = tcpListener\n\tn.tcpAddr = tcpListener.Addr().(*net.TCPAddr)\n\n\tgo n.listen()\n\n\treturn n\n}\n\nfunc (n *mockNSQD) listen() {\n\tlog.Printf(\"TCP: listening on %s\", n.tcpListener.Addr().String())\n\n\tfor {\n\t\tconn, err := n.tcpListener.Accept()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tgo n.handle(conn)\n\t}\n\n\tlog.Printf(\"TCP: closing %s\", n.tcpListener.Addr().String())\n\tclose(n.exitChan)\n}\n\nfunc (n *mockNSQD) handle(conn net.Conn) {\n\tvar idx int\n\n\tlog.Printf(\"TCP: new client(%s)\", conn.RemoteAddr())\n\n\tbuf := make([]byte, 4)\n\t_, err := io.ReadFull(conn, buf)\n\tif err != nil {\n\t\tlog.Fatalf(\"ERROR: failed to read protocol version - %s\", err)\n\t}\n\n\treadChan := make(chan []byte)\n\treadDoneChan := make(chan int)\n\tscriptTime := time.After(n.script[0].delay)\n\trdr := bufio.NewReader(conn)\n\n\tgo func() {\n\t\tfor {\n\t\t\tline, err := rdr.ReadBytes('\\n')\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// trim the '\\n'\n\t\t\tline = line[:len(line)-1]\n\t\t\treadChan <- line\n\t\t\t<-readDoneChan\n\t\t}\n\t}()\n\n\tvar rdyCount int\n\tfor idx < len(n.script) {\n\t\tselect {\n\t\tcase line := <-readChan:\n\t\t\tlog.Printf(\"mock: %s\", line)\n\t\t\tn.got = append(n.got, line)\n\t\t\tparams := bytes.Split(line, []byte(\" \"))\n\t\t\tswitch {\n\t\t\tcase bytes.Equal(params[0], []byte(\"IDENTIFY\")):\n\t\t\t\tl := make([]byte, 4)\n\t\t\t\t_, err := io.ReadFull(rdr, l)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(err.Error())\n\t\t\t\t\tgoto exit\n\t\t\t\t}\n\t\t\t\tsize := int32(binary.BigEndian.Uint32(l))\n\t\t\t\tb := make([]byte, size)\n\t\t\t\t_, err = io.ReadFull(rdr, b)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(err.Error())\n\t\t\t\t\tgoto exit\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"%s\", b)\n\t\t\tcase bytes.Equal(params[0], []byte(\"RDY\")):\n\t\t\t\trdy, _ := strconv.Atoi(string(params[1]))\n\t\t\t\trdyCount = rdy\n\t\t\tcase bytes.Equal(params[0], []byte(\"FIN\")):\n\t\t\tcase bytes.Equal(params[0], []byte(\"REQ\")):\n\t\t\t}\n\t\t\treadDoneChan <- 1\n\t\tcase <-scriptTime:\n\t\t\tinst := n.script[idx]\n\t\t\tif bytes.Equal(inst.body, []byte(\"exit\")) {\n\t\t\t\tgoto exit\n\t\t\t}\n\t\t\tif inst.frameType == FrameTypeMessage {\n\t\t\t\tif rdyCount == 0 {\n\t\t\t\t\tlog.Printf(\"!!! RDY == 0\")\n\t\t\t\t\tscriptTime = time.After(n.script[idx+1].delay)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trdyCount--\n\t\t\t}\n\t\t\t_, err := conn.Write(framedResponse(inst.frameType, inst.body))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(err.Error())\n\t\t\t\tgoto exit\n\t\t\t}\n\t\t\tscriptTime = time.After(n.script[idx+1].delay)\n\t\t\tidx++\n\t\t}\n\t}\n\nexit:\n\tn.tcpListener.Close()\n\tconn.Close()\n}\n\nfunc framedResponse(frameType int32, data []byte) []byte {\n\tvar w bytes.Buffer\n\n\tbeBuf := make([]byte, 4)\n\tsize := uint32(len(data)) + 4\n\n\tbinary.BigEndian.PutUint32(beBuf, size)\n\t_, err := w.Write(beBuf)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tbinary.BigEndian.PutUint32(beBuf, uint32(frameType))\n\t_, err = w.Write(beBuf)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t_, err = w.Write(data)\n\treturn w.Bytes()\n}\n\ntype testHandler struct{}\n\nfunc (h *testHandler) HandleMessage(message *Message) error {\n\tswitch string(message.Body) {\n\tcase \"requeue\":\n\t\tmessage.Requeue(-1)\n\t\treturn nil\n\tcase \"requeue_no_backoff_1\":\n\t\tif message.Attempts > 1 {\n\t\t\treturn nil\n\t\t}\n\t\tmessage.RequeueWithoutBackoff(-1)\n\t\treturn nil\n\tcase \"bad\":\n\t\treturn errors.New(\"bad\")\n\t}\n\treturn nil\n}\n\nfunc frameMessage(m *Message) []byte {\n\tvar b bytes.Buffer\n\tm.WriteTo(&b)\n\treturn b.Bytes()\n}\n\nfunc TestConsumerBackoff(t *testing.T) {\n\tmsgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}\n\tmsgGood := NewMessage(msgIDGood, []byte(\"good\"))\n\n\tmsgIDBad := MessageID{'z', 'x', 'c', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}\n\tmsgBad := NewMessage(msgIDBad, []byte(\"bad\"))\n\n\tscript := []instruction{\n\t\t// SUB\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\t// IDENTIFY\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgBad)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgBad)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\t// needed to exit test\n\t\tinstruction{200 * time.Millisecond, -1, []byte(\"exit\")},\n\t}\n\n\taddr, _ := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:0\")\n\tn := newMockNSQD(script, addr.String())\n\n\ttopicName := \"test_consumer_commands\" + strconv.Itoa(int(time.Now().Unix()))\n\tconfig := NewConfig()\n\tconfig.MaxInFlight = 5\n\tconfig.BackoffMultiplier = 10 * time.Millisecond\n\tq, _ := NewConsumer(topicName, \"ch\", config)\n\tq.SetLogger(newTestLogger(t), LogLevelDebug)\n\tq.AddHandler(&testHandler{})\n\terr := q.ConnectToNSQD(n.tcpAddr.String())\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t<-n.exitChan\n\n\tfor i, r := range n.got {\n\t\tlog.Printf(\"%d: %s\", i, r)\n\t}\n\n\texpected := []string{\n\t\t\"IDENTIFY\",\n\t\t\"SUB \" + topicName + \" ch\",\n\t\t\"RDY 5\",\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t\t\"RDY 5\",\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"REQ %s 0\", msgIDBad),\n\t\t\"RDY 1\",\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"REQ %s 0\", msgIDBad),\n\t\t\"RDY 1\",\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t\t\"RDY 1\",\n\t\t\"RDY 5\",\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t}\n\tif len(n.got) != len(expected) {\n\t\tt.Fatalf(\"we got %d commands != %d expected\", len(n.got), len(expected))\n\t}\n\tfor i, r := range n.got {\n\t\tif string(r) != expected[i] {\n\t\t\tt.Fatalf(\"cmd %d bad %s != %s\", i, r, expected[i])\n\t\t}\n\t}\n}\n\nfunc TestConsumerRequeueNoBackoff(t *testing.T) {\n\tmsgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}\n\tmsgIDRequeue := MessageID{'r', 'e', 'q', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}\n\tmsgIDRequeueNoBackoff := MessageID{'r', 'e', 'q', 'n', 'b', 'a', 'c', 'k', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}\n\n\tmsgGood := NewMessage(msgIDGood, []byte(\"good\"))\n\tmsgRequeue := NewMessage(msgIDRequeue, []byte(\"requeue\"))\n\tmsgRequeueNoBackoff := NewMessage(msgIDRequeueNoBackoff, []byte(\"requeue_no_backoff_1\"))\n\n\tscript := []instruction{\n\t\t// SUB\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\t// IDENTIFY\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeueNoBackoff)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\t// needed to exit test\n\t\tinstruction{100 * time.Millisecond, -1, []byte(\"exit\")},\n\t}\n\n\taddr, _ := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:0\")\n\tn := newMockNSQD(script, addr.String())\n\n\ttopicName := \"test_requeue\" + strconv.Itoa(int(time.Now().Unix()))\n\tconfig := NewConfig()\n\tconfig.MaxInFlight = 1\n\tconfig.BackoffMultiplier = 10 * time.Millisecond\n\tq, _ := NewConsumer(topicName, \"ch\", config)\n\tq.SetLogger(newTestLogger(t), LogLevelDebug)\n\tq.AddHandler(&testHandler{})\n\terr := q.ConnectToNSQD(n.tcpAddr.String())\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tselect {\n\tcase <-n.exitChan:\n\t\tlog.Printf(\"clean exit\")\n\tcase <-time.After(500 * time.Millisecond):\n\t\tlog.Printf(\"timeout\")\n\t}\n\n\tfor i, r := range n.got {\n\t\tlog.Printf(\"%d: %s\", i, r)\n\t}\n\n\texpected := []string{\n\t\t\"IDENTIFY\",\n\t\t\"SUB \" + topicName + \" ch\",\n\t\t\"RDY 1\",\n\t\t\"RDY 1\",\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"REQ %s 0\", msgIDRequeue),\n\t\t\"RDY 1\",\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"REQ %s 0\", msgIDRequeueNoBackoff),\n\t\t\"RDY 1\",\n\t\t\"RDY 1\",\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t}\n\tif len(n.got) != len(expected) {\n\t\tt.Fatalf(\"we got %d commands != %d expected\", len(n.got), len(expected))\n\t}\n\tfor i, r := range n.got {\n\t\tif string(r) != expected[i] {\n\t\t\tt.Fatalf(\"cmd %d bad %s != %s\", i, r, expected[i])\n\t\t}\n\t}\n}\n\nfunc TestConsumerBackoffDisconnect(t *testing.T) {\n\tmsgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}\n\tmsgIDRequeue := MessageID{'r', 'e', 'q', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}\n\n\tmsgGood := NewMessage(msgIDGood, []byte(\"good\"))\n\tmsgRequeue := NewMessage(msgIDRequeue, []byte(\"requeue\"))\n\n\tscript := []instruction{\n\t\t// SUB\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\t// IDENTIFY\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\t// needed to exit test\n\t\tinstruction{100 * time.Millisecond, -1, []byte(\"exit\")},\n\t}\n\n\taddr, _ := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:0\")\n\tn := newMockNSQD(script, addr.String())\n\n\ttopicName := \"test_requeue\" + strconv.Itoa(int(time.Now().Unix()))\n\tconfig := NewConfig()\n\tconfig.MaxInFlight = 5\n\tconfig.BackoffMultiplier = 10 * time.Millisecond\n\tconfig.LookupdPollInterval = 10 * time.Millisecond\n\tconfig.RDYRedistributeInterval = 10 * time.Millisecond\n\tq, _ := NewConsumer(topicName, \"ch\", config)\n\tq.SetLogger(newTestLogger(t), LogLevelDebug)\n\tq.AddHandler(&testHandler{})\n\terr := q.ConnectToNSQD(n.tcpAddr.String())\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tselect {\n\tcase <-n.exitChan:\n\t\tlog.Printf(\"clean exit\")\n\tcase <-time.After(500 * time.Millisecond):\n\t\tlog.Printf(\"timeout\")\n\t}\n\n\tfor i, r := range n.got {\n\t\tlog.Printf(\"%d: %s\", i, r)\n\t}\n\n\texpected := []string{\n\t\t\"IDENTIFY\",\n\t\t\"SUB \" + topicName + \" ch\",\n\t\t\"RDY 5\",\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"REQ %s 0\", msgIDRequeue),\n\t\t\"RDY 1\",\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"REQ %s 0\", msgIDRequeue),\n\t\t\"RDY 1\",\n\t\t\"RDY 0\",\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t\t\"RDY 1\",\n\t}\n\tif len(n.got) != len(expected) {\n\t\tt.Fatalf(\"we got %d commands != %d expected\", len(n.got), len(expected))\n\t}\n\tfor i, r := range n.got {\n\t\tif string(r) != expected[i] {\n\t\t\tt.Fatalf(\"cmd %d bad %s != %s\", i, r, expected[i])\n\t\t}\n\t}\n\n\tscript = []instruction{\n\t\t// SUB\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\t// IDENTIFY\n\t\tinstruction{0, FrameTypeResponse, []byte(\"OK\")},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\tinstruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)},\n\t\t// needed to exit test\n\t\tinstruction{100 * time.Millisecond, -1, []byte(\"exit\")},\n\t}\n\n\tn = newMockNSQD(script, n.tcpAddr.String())\n\n\tselect {\n\tcase <-n.exitChan:\n\t\tlog.Printf(\"clean exit\")\n\tcase <-time.After(500 * time.Millisecond):\n\t\tlog.Printf(\"timeout\")\n\t}\n\n\tfor i, r := range n.got {\n\t\tlog.Printf(\"%d: %s\", i, r)\n\t}\n\n\texpected = []string{\n\t\t\"IDENTIFY\",\n\t\t\"SUB \" + topicName + \" ch\",\n\t\t\"RDY 1\",\n\t\t\"RDY 5\",\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t\tfmt.Sprintf(\"FIN %s\", msgIDGood),\n\t}\n\tif len(n.got) != len(expected) {\n\t\tt.Fatalf(\"we got %d commands != %d expected\", len(n.got), len(expected))\n\t}\n\tfor i, r := range n.got {\n\t\tif string(r) != expected[i] {\n\t\t\tt.Fatalf(\"cmd %d bad %s != %s\", i, r, expected[i])\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/producer.go",
    "content": "package nsq\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\ntype producerConn interface {\n\tString() string\n\tSetLogger(logger, LogLevel, string)\n\tConnect() (*IdentifyResponse, error)\n\tClose() error\n\tWriteCommand(*Command) error\n}\n\n// Producer is a high-level type to publish to NSQ.\n//\n// A Producer instance is 1:1 with a destination `nsqd`\n// and will lazily connect to that instance (and re-connect)\n// when Publish commands are executed.\ntype Producer struct {\n\tid     int64\n\taddr   string\n\tconn   producerConn\n\tconfig Config\n\n\tlogger   logger\n\tlogLvl   LogLevel\n\tlogGuard sync.RWMutex\n\n\tresponseChan chan []byte\n\terrorChan    chan []byte\n\tcloseChan    chan int\n\n\ttransactionChan chan *ProducerTransaction\n\ttransactions    []*ProducerTransaction\n\tstate           int32\n\n\tconcurrentProducers int32\n\tstopFlag            int32\n\texitChan            chan int\n\twg                  sync.WaitGroup\n\tguard               sync.Mutex\n}\n\n// ProducerTransaction is returned by the async publish methods\n// to retrieve metadata about the command after the\n// response is received.\ntype ProducerTransaction struct {\n\tcmd      *Command\n\tdoneChan chan *ProducerTransaction\n\tError    error         // the error (or nil) of the publish command\n\tArgs     []interface{} // the slice of variadic arguments passed to PublishAsync or MultiPublishAsync\n}\n\nfunc (t *ProducerTransaction) finish() {\n\tif t.doneChan != nil {\n\t\tt.doneChan <- t\n\t}\n}\n\n// NewProducer returns an instance of Producer for the specified address\n//\n// The only valid way to create a Config is via NewConfig, using a struct literal will panic.\n// After Config is passed into NewProducer the values are no longer mutable (they are copied).\nfunc NewProducer(addr string, config *Config) (*Producer, error) {\n\tconfig.assertInitialized()\n\terr := config.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := &Producer{\n\t\tid: atomic.AddInt64(&instCount, 1),\n\n\t\taddr:   addr,\n\t\tconfig: *config,\n\n\t\tlogger: log.New(os.Stderr, \"\", log.Flags()),\n\t\tlogLvl: LogLevelInfo,\n\n\t\ttransactionChan: make(chan *ProducerTransaction),\n\t\texitChan:        make(chan int),\n\t\tresponseChan:    make(chan []byte),\n\t\terrorChan:       make(chan []byte),\n\t}\n\treturn p, nil\n}\n\n// Ping causes the Producer to connect to it's configured nsqd (if not already\n// connected) and send a `Nop` command, returning any error that might occur.\n//\n// This method can be used to verify that a newly-created Producer instance is\n// configured correctly, rather than relying on the lazy \"connect on Publish\"\n// behavior of a Producer.\nfunc (w *Producer) Ping() error {\n\tif atomic.LoadInt32(&w.state) != StateConnected {\n\t\terr := w.connect()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn w.conn.WriteCommand(Nop())\n}\n\n// SetLogger assigns the logger to use as well as a level\n//\n// The logger parameter is an interface that requires the following\n// method to be implemented (such as the the stdlib log.Logger):\n//\n//    Output(calldepth int, s string)\n//\nfunc (w *Producer) SetLogger(l logger, lvl LogLevel) {\n\tw.logGuard.Lock()\n\tdefer w.logGuard.Unlock()\n\n\tw.logger = l\n\tw.logLvl = lvl\n}\n\nfunc (w *Producer) getLogger() (logger, LogLevel) {\n\tw.logGuard.RLock()\n\tdefer w.logGuard.RUnlock()\n\n\treturn w.logger, w.logLvl\n}\n\n// String returns the address of the Producer\nfunc (w *Producer) String() string {\n\treturn w.addr\n}\n\n// Stop initiates a graceful stop of the Producer (permanent)\n//\n// NOTE: this blocks until completion\nfunc (w *Producer) Stop() {\n\tw.guard.Lock()\n\tif !atomic.CompareAndSwapInt32(&w.stopFlag, 0, 1) {\n\t\tw.guard.Unlock()\n\t\treturn\n\t}\n\tw.log(LogLevelInfo, \"stopping\")\n\tclose(w.exitChan)\n\tw.close()\n\tw.guard.Unlock()\n\tw.wg.Wait()\n}\n\n// PublishAsync publishes a message body to the specified topic\n// but does not wait for the response from `nsqd`.\n//\n// When the Producer eventually receives the response from `nsqd`,\n// the supplied `doneChan` (if specified)\n// will receive a `ProducerTransaction` instance with the supplied variadic arguments\n// and the response error if present\nfunc (w *Producer) PublishAsync(topic string, body []byte, doneChan chan *ProducerTransaction,\n\targs ...interface{}) error {\n\treturn w.sendCommandAsync(Publish(topic, body), doneChan, args)\n}\n\n// MultiPublishAsync publishes a slice of message bodies to the specified topic\n// but does not wait for the response from `nsqd`.\n//\n// When the Producer eventually receives the response from `nsqd`,\n// the supplied `doneChan` (if specified)\n// will receive a `ProducerTransaction` instance with the supplied variadic arguments\n// and the response error if present\nfunc (w *Producer) MultiPublishAsync(topic string, body [][]byte, doneChan chan *ProducerTransaction,\n\targs ...interface{}) error {\n\tcmd, err := MultiPublish(topic, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn w.sendCommandAsync(cmd, doneChan, args)\n}\n\n// Publish synchronously publishes a message body to the specified topic, returning\n// an error if publish failed\nfunc (w *Producer) Publish(topic string, body []byte) error {\n\treturn w.sendCommand(Publish(topic, body))\n}\n\n// MultiPublish synchronously publishes a slice of message bodies to the specified topic, returning\n// an error if publish failed\nfunc (w *Producer) MultiPublish(topic string, body [][]byte) error {\n\tcmd, err := MultiPublish(topic, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn w.sendCommand(cmd)\n}\n\nfunc (w *Producer) sendCommand(cmd *Command) error {\n\tdoneChan := make(chan *ProducerTransaction)\n\terr := w.sendCommandAsync(cmd, doneChan, nil)\n\tif err != nil {\n\t\tclose(doneChan)\n\t\treturn err\n\t}\n\tt := <-doneChan\n\treturn t.Error\n}\n\nfunc (w *Producer) sendCommandAsync(cmd *Command, doneChan chan *ProducerTransaction,\n\targs []interface{}) error {\n\t// keep track of how many outstanding producers we're dealing with\n\t// in order to later ensure that we clean them all up...\n\tatomic.AddInt32(&w.concurrentProducers, 1)\n\tdefer atomic.AddInt32(&w.concurrentProducers, -1)\n\n\tif atomic.LoadInt32(&w.state) != StateConnected {\n\t\terr := w.connect()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt := &ProducerTransaction{\n\t\tcmd:      cmd,\n\t\tdoneChan: doneChan,\n\t\tArgs:     args,\n\t}\n\n\tselect {\n\tcase w.transactionChan <- t:\n\tcase <-w.exitChan:\n\t\treturn ErrStopped\n\t}\n\n\treturn nil\n}\n\nfunc (w *Producer) connect() error {\n\tw.guard.Lock()\n\tdefer w.guard.Unlock()\n\n\tif atomic.LoadInt32(&w.stopFlag) == 1 {\n\t\treturn ErrStopped\n\t}\n\n\tswitch state := atomic.LoadInt32(&w.state); state {\n\tcase StateInit:\n\tcase StateConnected:\n\t\treturn nil\n\tdefault:\n\t\treturn ErrNotConnected\n\t}\n\n\tw.log(LogLevelInfo, \"(%s) connecting to nsqd\", w.addr)\n\n\tlogger, logLvl := w.getLogger()\n\n\tw.conn = NewConn(w.addr, &w.config, &producerConnDelegate{w})\n\tw.conn.SetLogger(logger, logLvl, fmt.Sprintf(\"%3d (%%s)\", w.id))\n\n\t_, err := w.conn.Connect()\n\tif err != nil {\n\t\tw.conn.Close()\n\t\tw.log(LogLevelError, \"(%s) error connecting to nsqd - %s\", w.addr, err)\n\t\treturn err\n\t}\n\tatomic.StoreInt32(&w.state, StateConnected)\n\tw.closeChan = make(chan int)\n\tw.wg.Add(1)\n\tgo w.router()\n\n\treturn nil\n}\n\nfunc (w *Producer) close() {\n\tif !atomic.CompareAndSwapInt32(&w.state, StateConnected, StateDisconnected) {\n\t\treturn\n\t}\n\tw.conn.Close()\n\tgo func() {\n\t\t// we need to handle this in a goroutine so we don't\n\t\t// block the caller from making progress\n\t\tw.wg.Wait()\n\t\tatomic.StoreInt32(&w.state, StateInit)\n\t}()\n}\n\nfunc (w *Producer) router() {\n\tfor {\n\t\tselect {\n\t\tcase t := <-w.transactionChan:\n\t\t\tw.transactions = append(w.transactions, t)\n\t\t\terr := w.conn.WriteCommand(t.cmd)\n\t\t\tif err != nil {\n\t\t\t\tw.log(LogLevelError, \"(%s) sending command - %s\", w.conn.String(), err)\n\t\t\t\tw.close()\n\t\t\t}\n\t\tcase data := <-w.responseChan:\n\t\t\tw.popTransaction(FrameTypeResponse, data)\n\t\tcase data := <-w.errorChan:\n\t\t\tw.popTransaction(FrameTypeError, data)\n\t\tcase <-w.closeChan:\n\t\t\tgoto exit\n\t\tcase <-w.exitChan:\n\t\t\tgoto exit\n\t\t}\n\t}\n\nexit:\n\tw.transactionCleanup()\n\tw.wg.Done()\n\tw.log(LogLevelInfo, \"exiting router\")\n}\n\nfunc (w *Producer) popTransaction(frameType int32, data []byte) {\n\tt := w.transactions[0]\n\tw.transactions = w.transactions[1:]\n\tif frameType == FrameTypeError {\n\t\tt.Error = ErrProtocol{string(data)}\n\t}\n\tt.finish()\n}\n\nfunc (w *Producer) transactionCleanup() {\n\t// clean up transactions we can easily account for\n\tfor _, t := range w.transactions {\n\t\tt.Error = ErrNotConnected\n\t\tt.finish()\n\t}\n\tw.transactions = w.transactions[:0]\n\n\t// spin and free up any writes that might have raced\n\t// with the cleanup process (blocked on writing\n\t// to transactionChan)\n\tfor {\n\t\tselect {\n\t\tcase t := <-w.transactionChan:\n\t\t\tt.Error = ErrNotConnected\n\t\t\tt.finish()\n\t\tdefault:\n\t\t\t// keep spinning until there are 0 concurrent producers\n\t\t\tif atomic.LoadInt32(&w.concurrentProducers) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// give the runtime a chance to schedule other racing goroutines\n\t\t\ttime.Sleep(5 * time.Millisecond)\n\t\t}\n\t}\n}\n\nfunc (w *Producer) log(lvl LogLevel, line string, args ...interface{}) {\n\tlogger, logLvl := w.getLogger()\n\n\tif logger == nil {\n\t\treturn\n\t}\n\n\tif logLvl > lvl {\n\t\treturn\n\t}\n\n\tlogger.Output(2, fmt.Sprintf(\"%-4s %3d %s\", lvl, w.id, fmt.Sprintf(line, args...)))\n}\n\nfunc (w *Producer) onConnResponse(c *Conn, data []byte) { w.responseChan <- data }\nfunc (w *Producer) onConnError(c *Conn, data []byte)    { w.errorChan <- data }\nfunc (w *Producer) onConnHeartbeat(c *Conn)             {}\nfunc (w *Producer) onConnIOError(c *Conn, err error)    { w.close() }\nfunc (w *Producer) onConnClose(c *Conn) {\n\tw.guard.Lock()\n\tdefer w.guard.Unlock()\n\tclose(w.closeChan)\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/producer_test.go",
    "content": "package nsq\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype ConsumerHandler struct {\n\tt              *testing.T\n\tq              *Consumer\n\tmessagesGood   int\n\tmessagesFailed int\n}\n\nfunc (h *ConsumerHandler) LogFailedMessage(message *Message) {\n\th.messagesFailed++\n\th.q.Stop()\n}\n\nfunc (h *ConsumerHandler) HandleMessage(message *Message) error {\n\tmsg := string(message.Body)\n\tif msg == \"bad_test_case\" {\n\t\treturn errors.New(\"fail this message\")\n\t}\n\tif msg != \"multipublish_test_case\" && msg != \"publish_test_case\" {\n\t\th.t.Error(\"message 'action' was not correct:\", msg)\n\t}\n\th.messagesGood++\n\treturn nil\n}\n\nfunc TestProducerConnection(t *testing.T) {\n\tconfig := NewConfig()\n\tladdr := \"127.0.0.2\"\n\n\tconfig.LocalAddr, _ = net.ResolveTCPAddr(\"tcp\", laddr+\":0\")\n\n\tw, _ := NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\n\terr := w.Publish(\"write_test\", []byte(\"test\"))\n\tif err != nil {\n\t\tt.Fatalf(\"should lazily connect - %s\", err)\n\t}\n\n\tconn := w.conn.(*Conn)\n\tif !strings.HasPrefix(conn.conn.LocalAddr().String(), laddr) {\n\t\tt.Fatal(\"producer connection should be bound to specified address:\", conn.conn.LocalAddr())\n\t}\n\n\tw.Stop()\n\n\terr = w.Publish(\"write_test\", []byte(\"fail test\"))\n\tif err != ErrStopped {\n\t\tt.Fatalf(\"should not be able to write after Stop()\")\n\t}\n}\n\nfunc TestProducerPing(t *testing.T) {\n\tlog.SetOutput(ioutil.Discard)\n\tdefer log.SetOutput(os.Stdout)\n\n\tconfig := NewConfig()\n\tw, _ := NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\n\terr := w.Ping()\n\n\tif err != nil {\n\t\tt.Fatalf(\"should connect on ping\")\n\t}\n\n\tw.Stop()\n\n\terr = w.Ping()\n\tif err != ErrStopped {\n\t\tt.Fatalf(\"should not be able to ping after Stop()\")\n\t}\n}\n\nfunc TestProducerPublish(t *testing.T) {\n\ttopicName := \"publish\" + strconv.Itoa(int(time.Now().Unix()))\n\tmsgCount := 10\n\n\tconfig := NewConfig()\n\tw, _ := NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\tdefer w.Stop()\n\n\tfor i := 0; i < msgCount; i++ {\n\t\terr := w.Publish(topicName, []byte(\"publish_test_case\"))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error %s\", err)\n\t\t}\n\t}\n\n\terr := w.Publish(topicName, []byte(\"bad_test_case\"))\n\tif err != nil {\n\t\tt.Fatalf(\"error %s\", err)\n\t}\n\n\treadMessages(topicName, t, msgCount)\n}\n\nfunc TestProducerMultiPublish(t *testing.T) {\n\ttopicName := \"multi_publish\" + strconv.Itoa(int(time.Now().Unix()))\n\tmsgCount := 10\n\n\tconfig := NewConfig()\n\tw, _ := NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\tdefer w.Stop()\n\n\tvar testData [][]byte\n\tfor i := 0; i < msgCount; i++ {\n\t\ttestData = append(testData, []byte(\"multipublish_test_case\"))\n\t}\n\n\terr := w.MultiPublish(topicName, testData)\n\tif err != nil {\n\t\tt.Fatalf(\"error %s\", err)\n\t}\n\n\terr = w.Publish(topicName, []byte(\"bad_test_case\"))\n\tif err != nil {\n\t\tt.Fatalf(\"error %s\", err)\n\t}\n\n\treadMessages(topicName, t, msgCount)\n}\n\nfunc TestProducerPublishAsync(t *testing.T) {\n\ttopicName := \"async_publish\" + strconv.Itoa(int(time.Now().Unix()))\n\tmsgCount := 10\n\n\tconfig := NewConfig()\n\tw, _ := NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\tdefer w.Stop()\n\n\tresponseChan := make(chan *ProducerTransaction, msgCount)\n\tfor i := 0; i < msgCount; i++ {\n\t\terr := w.PublishAsync(topicName, []byte(\"publish_test_case\"), responseChan, \"test\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\tfor i := 0; i < msgCount; i++ {\n\t\ttrans := <-responseChan\n\t\tif trans.Error != nil {\n\t\t\tt.Fatalf(trans.Error.Error())\n\t\t}\n\t\tif trans.Args[0].(string) != \"test\" {\n\t\t\tt.Fatalf(`proxied arg \"%s\" != \"test\"`, trans.Args[0].(string))\n\t\t}\n\t}\n\n\terr := w.Publish(topicName, []byte(\"bad_test_case\"))\n\tif err != nil {\n\t\tt.Fatalf(\"error %s\", err)\n\t}\n\n\treadMessages(topicName, t, msgCount)\n}\n\nfunc TestProducerMultiPublishAsync(t *testing.T) {\n\ttopicName := \"multi_publish\" + strconv.Itoa(int(time.Now().Unix()))\n\tmsgCount := 10\n\n\tconfig := NewConfig()\n\tw, _ := NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\tdefer w.Stop()\n\n\tvar testData [][]byte\n\tfor i := 0; i < msgCount; i++ {\n\t\ttestData = append(testData, []byte(\"multipublish_test_case\"))\n\t}\n\n\tresponseChan := make(chan *ProducerTransaction)\n\terr := w.MultiPublishAsync(topicName, testData, responseChan, \"test0\", 1)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\ttrans := <-responseChan\n\tif trans.Error != nil {\n\t\tt.Fatalf(trans.Error.Error())\n\t}\n\tif trans.Args[0].(string) != \"test0\" {\n\t\tt.Fatalf(`proxied arg \"%s\" != \"test0\"`, trans.Args[0].(string))\n\t}\n\tif trans.Args[1].(int) != 1 {\n\t\tt.Fatalf(`proxied arg %d != 1`, trans.Args[1].(int))\n\t}\n\n\terr = w.Publish(topicName, []byte(\"bad_test_case\"))\n\tif err != nil {\n\t\tt.Fatalf(\"error %s\", err)\n\t}\n\n\treadMessages(topicName, t, msgCount)\n}\n\nfunc TestProducerHeartbeat(t *testing.T) {\n\ttopicName := \"heartbeat\" + strconv.Itoa(int(time.Now().Unix()))\n\n\tconfig := NewConfig()\n\tconfig.HeartbeatInterval = 100 * time.Millisecond\n\tw, _ := NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\tdefer w.Stop()\n\n\terr := w.Publish(topicName, []byte(\"publish_test_case\"))\n\tif err == nil {\n\t\tt.Fatalf(\"error should not be nil\")\n\t}\n\tif identifyError, ok := err.(ErrIdentify); !ok ||\n\t\tidentifyError.Reason != \"E_BAD_BODY IDENTIFY heartbeat interval (100) is invalid\" {\n\t\tt.Fatalf(\"wrong error - %s\", err)\n\t}\n\n\tconfig = NewConfig()\n\tconfig.HeartbeatInterval = 1000 * time.Millisecond\n\tw, _ = NewProducer(\"127.0.0.1:4150\", config)\n\tw.SetLogger(nullLogger, LogLevelInfo)\n\tdefer w.Stop()\n\n\terr = w.Publish(topicName, []byte(\"publish_test_case\"))\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\ttime.Sleep(1100 * time.Millisecond)\n\n\tmsgCount := 10\n\tfor i := 0; i < msgCount; i++ {\n\t\terr := w.Publish(topicName, []byte(\"publish_test_case\"))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error %s\", err)\n\t\t}\n\t}\n\n\terr = w.Publish(topicName, []byte(\"bad_test_case\"))\n\tif err != nil {\n\t\tt.Fatalf(\"error %s\", err)\n\t}\n\n\treadMessages(topicName, t, msgCount+1)\n}\n\nfunc readMessages(topicName string, t *testing.T, msgCount int) {\n\tconfig := NewConfig()\n\tconfig.DefaultRequeueDelay = 0\n\tconfig.MaxBackoffDuration = 50 * time.Millisecond\n\tq, _ := NewConsumer(topicName, \"ch\", config)\n\tq.SetLogger(nullLogger, LogLevelInfo)\n\n\th := &ConsumerHandler{\n\t\tt: t,\n\t\tq: q,\n\t}\n\tq.AddHandler(h)\n\n\terr := q.ConnectToNSQD(\"127.0.0.1:4150\")\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\t<-q.StopChan\n\n\tif h.messagesGood != msgCount {\n\t\tt.Fatalf(\"end of test. should have handled a diff number of messages %d != %d\", h.messagesGood, msgCount)\n\t}\n\n\tif h.messagesFailed != 1 {\n\t\tt.Fatal(\"failed message not done\")\n\t}\n}\n\ntype mockProducerConn struct {\n\tdelegate ConnDelegate\n\tcloseCh  chan struct{}\n\tpubCh    chan struct{}\n}\n\nfunc newMockProducerConn(delegate ConnDelegate) producerConn {\n\tm := &mockProducerConn{\n\t\tdelegate: delegate,\n\t\tcloseCh:  make(chan struct{}),\n\t\tpubCh:    make(chan struct{}, 4),\n\t}\n\tgo m.router()\n\treturn m\n}\n\nfunc (m *mockProducerConn) String() string {\n\treturn \"127.0.0.1:0\"\n}\n\nfunc (m *mockProducerConn) SetLogger(logger logger, level LogLevel, prefix string) {}\n\nfunc (m *mockProducerConn) Connect() (*IdentifyResponse, error) {\n\treturn &IdentifyResponse{}, nil\n}\n\nfunc (m *mockProducerConn) Close() error {\n\tclose(m.closeCh)\n\treturn nil\n}\n\nfunc (m *mockProducerConn) WriteCommand(cmd *Command) error {\n\tif bytes.Equal(cmd.Name, []byte(\"PUB\")) {\n\t\tm.pubCh <- struct{}{}\n\t}\n\treturn nil\n}\n\nfunc (m *mockProducerConn) router() {\n\tfor {\n\t\tselect {\n\t\tcase <-m.closeCh:\n\t\t\tgoto exit\n\t\tcase <-m.pubCh:\n\t\t\tm.delegate.OnResponse(nil, framedResponse(FrameTypeResponse, []byte(\"OK\")))\n\t\t}\n\t}\nexit:\n}\n\nfunc BenchmarkProducer(b *testing.B) {\n\tb.StopTimer()\n\tbody := make([]byte, 512)\n\n\tconfig := NewConfig()\n\tp, _ := NewProducer(\"127.0.0.1:0\", config)\n\n\tp.conn = newMockProducerConn(&producerConnDelegate{p})\n\tatomic.StoreInt32(&p.state, StateConnected)\n\tp.closeChan = make(chan int)\n\tgo p.router()\n\n\tstartCh := make(chan struct{})\n\tvar wg sync.WaitGroup\n\tparallel := runtime.GOMAXPROCS(0)\n\n\tfor j := 0; j < parallel; j++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\t<-startCh\n\t\t\tfor i := 0; i < b.N/parallel; i++ {\n\t\t\t\tp.Publish(\"test\", body)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tb.StartTimer()\n\tclose(startCh)\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/protocol.go",
    "content": "package nsq\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"regexp\"\n)\n\n// MagicV1 is the initial identifier sent when connecting for V1 clients\nvar MagicV1 = []byte(\"  V1\")\n\n// MagicV2 is the initial identifier sent when connecting for V2 clients\nvar MagicV2 = []byte(\"  V2\")\n\n// frame types\nconst (\n\tFrameTypeResponse int32 = 0\n\tFrameTypeError    int32 = 1\n\tFrameTypeMessage  int32 = 2\n)\n\nvar validTopicChannelNameRegex = regexp.MustCompile(`^[\\.a-zA-Z0-9_-]+(#ephemeral)?$`)\n\n// IsValidTopicName checks a topic name for correctness\nfunc IsValidTopicName(name string) bool {\n\treturn isValidName(name)\n}\n\n// IsValidChannelName checks a channel name for correctness\nfunc IsValidChannelName(name string) bool {\n\treturn isValidName(name)\n}\n\nfunc isValidName(name string) bool {\n\tif len(name) > 64 || len(name) < 1 {\n\t\treturn false\n\t}\n\treturn validTopicChannelNameRegex.MatchString(name)\n}\n\n// ReadResponse is a client-side utility function to read from the supplied Reader\n// according to the NSQ protocol spec:\n//\n//    [x][x][x][x][x][x][x][x]...\n//    |  (int32) || (binary)\n//    |  4-byte  || N-byte\n//    ------------------------...\n//        size       data\nfunc ReadResponse(r io.Reader) ([]byte, error) {\n\tvar msgSize int32\n\n\t// message size\n\terr := binary.Read(r, binary.BigEndian, &msgSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// message binary data\n\tbuf := make([]byte, msgSize)\n\t_, err = io.ReadFull(r, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}\n\n// UnpackResponse is a client-side utility function that unpacks serialized data\n// according to NSQ protocol spec:\n//\n//    [x][x][x][x][x][x][x][x]...\n//    |  (int32) || (binary)\n//    |  4-byte  || N-byte\n//    ------------------------...\n//      frame ID     data\n//\n// Returns a triplicate of: frame type, data ([]byte), error\nfunc UnpackResponse(response []byte) (int32, []byte, error) {\n\tif len(response) < 4 {\n\t\treturn -1, nil, errors.New(\"length of response is too small\")\n\t}\n\n\treturn int32(binary.BigEndian.Uint32(response)), response[4:], nil\n}\n\n// ReadUnpackedResponse reads and parses data from the underlying\n// TCP connection according to the NSQ TCP protocol spec and\n// returns the frameType, data or error\nfunc ReadUnpackedResponse(r io.Reader) (int32, []byte, error) {\n\tresp, err := ReadResponse(r)\n\tif err != nil {\n\t\treturn -1, nil, err\n\t}\n\treturn UnpackResponse(resp)\n}\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/states.go",
    "content": "package nsq\n\n// states\nconst (\n\tStateInit = iota\n\tStateDisconnected\n\tStateConnected\n\tStateSubscribed\n\t// StateClosing means CLOSE has started...\n\t// (responses are ok, but no new messages will be sent)\n\tStateClosing\n)\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/test/ca.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID9zCCAt+gAwIBAgIJAPYpAVNDj2lgMA0GCSqGSIb3DQEBBQUAMIGRMQswCQYD\nVQQGEwJERTEMMAoGA1UECAwDTlJXMQ4wDAYDVQQHDAVFYXJ0aDEXMBUGA1UECgwO\nUmFuZG9tIENvbXBhbnkxCzAJBgNVBAsMAklUMRcwFQYDVQQDDA53d3cucmFuZG9t\nLmNvbTElMCMGCSqGSIb3DQEJARYWS3J5cHRvS2luZ3NAcmFuZG9tLmNvbTAeFw0x\nNDA0MDIyMTE0NTJaFw0xNTA0MDIyMTE0NTJaMIGRMQswCQYDVQQGEwJERTEMMAoG\nA1UECAwDTlJXMQ4wDAYDVQQHDAVFYXJ0aDEXMBUGA1UECgwOUmFuZG9tIENvbXBh\nbnkxCzAJBgNVBAsMAklUMRcwFQYDVQQDDA53d3cucmFuZG9tLmNvbTElMCMGCSqG\nSIb3DQEJARYWS3J5cHRvS2luZ3NAcmFuZG9tLmNvbTCCASIwDQYJKoZIhvcNAQEB\nBQADggEPADCCAQoCggEBAL/sJU6ODQCsdWAmq3Qyp6vCqVFkSIHwR3oH8vPuwwob\nIOrx/pXz2LIRekQ4egT8LCH3QDxhEvFhDNXYM4h/mkQ+GpgzynoIqYrw+yF93pik\nT9Tpel2IuntThlZrO/4APRbVw4Ihf3zp47AY71J+8usJjmfWsId4dhqa1lTYecXK\nZwxii8RTH/7LsuwIDOW1QJLGGKNzvVYA42Gh8Cw3uHlmqZ2tA/sp5qg1Z3QU5g7y\nEzzRybotHaRb5XMUWHAlGbIl/TW4KlFqFZ0kCXJXL1uO3uq2nIS3bG7ryjbobRVn\ndZ6sV34eenIeZWu6zlDxQP/EqxAezz5Ndyt9uYWb/JECAwEAAaNQME4wHQYDVR0O\nBBYEFI9l/QHE30clqx+1oCR6IhUYEdqLMB8GA1UdIwQYMBaAFI9l/QHE30clqx+1\noCR6IhUYEdqLMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAES6GKxL\noeCLZa83TjJXLagcc9mmdQZgfF3/o61+ye7D9BLqBwN5lx4+kIE1LAUI/Id0mCdW\n9uXmPhpCJ926krahNc4Ol+wQTmZ3j7Mn3DCkFufjr64cGPU/UzH4yjMg9wEf84qz\n5oH+dBifwJM8yoRCxbnMqGBu3xY8WCjPlw8E8lizXFk8wUbLZ/EC5Rjm+KmdT5ud\nKTEgM+K6RMNo9vLn5ZasrYyhVcHdEKIzo6qLm1ZVIgpi/1WX0m8hACMfEcqee6ot\n76LEyM3kwfqRkWGZWHEF9D4emp3quU+0AmjM57LHrYjidpDJkVTUHDoMBFHl9Uiq\n0O9+azN48F/bVgU=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/test/server.key",
    "content": "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDFHWaPfRA5nU/F\nE8AVoFj2TAgMRISLduWlbAgDnMtFLSGVwgjxU13Txsv0LgwJgo4A5xpd2WNV0dIQ\nbrerxvPVJruKO8KxKFS2U58BCFIG0xGrlQSg5wDGyqxEQY80XlrBtxs81v79GYHy\nfBhizg7onlmbNZzxPy7idU0a7TpgzakeDrfJHQ7rI3llvR0U0TdOLno82CtPvosY\n6TYZAIFYgH05yN7DWKuDUI8Fa2RFVkbHPUlJVKROw/0n1yWy7XcwTmQQyaodFYgg\nKMCdyR0ElPxLv8dKYFjLvef2DTmuYwbalt5hiQqOpY1wm616Xf4ywz2uEU+ooLW4\n/Q6DcRUBAgMBAAECggEBAKDUgVc4YuGvkmOp3sOGhq/Hj5V63m7wvyV/BPb1mwFB\ndrK7lBJbxCXEgaslWxrzVCw2ZFQIyL1AKtbPj7tv5ImZRdHfOtbTonL1vbnY8Ryy\nYSuPtiwW75JD0dULbO38wq5mWaoFfh5DDr0sNbWAjbeNZG14jCpnNDkAHr6Gq2hJ\nVzwEwy+W7LXn8s8lYJHi3MsxCJqAFN1u2FOkjBtrcVW06VgV87IX59SOFns80x4E\nNn0ZKH7RU2DuJ7Fm4HtaNH+yaDYxUeo2A/2/UoavyYYCgC5gThsNjlp9/R4gtm0h\nVO+8cN5a3s7zL+aITIusY7H/rwWc6XpRmxQn+jwqF8ECgYEA5PpAz4fhXA9abFZx\n0XqCgcwTFY5gTX/JDd1qv0b/PueAR7dY5d37bRbTXpzrHiNFVrq3x432V3+KY0b5\n55PEB1YxwBUga5DvTSa5fLfUibvLpdZjganzdTOsG53wMvNwUT8iUzUQDLkyRfIi\nmV0r4Sa34RrBZdWJ2Aou9by2SlkCgYEA3GCHTP7nAcuHXRTsXH3eK/HsfwxdwjhA\nG5SG7L7KSoMpzCbe90DuYEr6J/O1nnP0QiSQ2uEeTOARzMfio4E16exWlDDtvPBQ\nHqSuQKg4M7fMTN1tj95xmk1yGZMyPxgEfCScBeCbYQzOyZ0j93iFjqMnb2mlriq5\nMoSPat3BeukCgYEAjSGaFNABnUZxS1k0qhLCodHw6VZqERp0B7Gze9X8uP7jWFCv\n4G6j66cn/KbnXBoNQNmxMLRVY7TezTYQDiZLquH7pBLheqtIc9ssdKyxuXsgmES9\n7EueHV0N9a+xPxZA4jLxqyuHivATBn2pybFdvFaq+3oMPgISBjCwpRH9oXECgYAN\n+n16j8ydW4iZieM4Nq+p/+1tXZ5w3FqMpU4tpCh2s30qOuj3rAGyz+6wLBBAHcDH\nlUQu7gqa+7eFUsR4dJCz5s7TFYtu6ZtbZjy7UzBFb4og8gaqEoUIMZNkNecBA4f9\nS+EtqkKQ1Fwlg7ctUlK+anDs6zmcI4+dubTTJX/JSQKBgQCsu/gCgoOi2GFgebIh\nURvEMrhaiHxcw5u30nMNjWUGpDQK3lVTK51+7wj4xmVfiomvUW6M/HaR2+5xF1U1\nQV08cKeWCGfGUFetTxjdhsVhMIk84ygF2l9K6jiHqvtd5rIoQ9Lf8XXbYaQVicRg\nqmB2iOzmbQQM/GOSofAeUfE7/A==\n-----END PRIVATE KEY-----\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/test/server.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID7zCCAtegAwIBAgIJAMsErP97ZQmgMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYD\nVQQGEwJERTEMMAoGA1UECAwDTlJXMQ4wDAYDVQQHDAVFYXJ0aDEXMBUGA1UECgwO\nUmFuZG9tIENvbXBhbnkxCzAJBgNVBAsMAklUMRcwFQYDVQQDDA53d3cucmFuZG9t\nLmNvbTEhMB8GCSqGSIb3DQEJARYSZm9vYmFyQGV4YW1wbGUuY29tMB4XDTE0MDQw\nMjIxMTQ1MloXDTI0MDMzMDIxMTQ1MlowgY0xCzAJBgNVBAYTAkRFMQwwCgYDVQQI\nDANOUlcxDjAMBgNVBAcMBUVhcnRoMRcwFQYDVQQKDA5SYW5kb20gQ29tcGFueTEL\nMAkGA1UECwwCSVQxFzAVBgNVBAMMDnd3dy5yYW5kb20uY29tMSEwHwYJKoZIhvcN\nAQkBFhJmb29iYXJAZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw\nggEKAoIBAQDFHWaPfRA5nU/FE8AVoFj2TAgMRISLduWlbAgDnMtFLSGVwgjxU13T\nxsv0LgwJgo4A5xpd2WNV0dIQbrerxvPVJruKO8KxKFS2U58BCFIG0xGrlQSg5wDG\nyqxEQY80XlrBtxs81v79GYHyfBhizg7onlmbNZzxPy7idU0a7TpgzakeDrfJHQ7r\nI3llvR0U0TdOLno82CtPvosY6TYZAIFYgH05yN7DWKuDUI8Fa2RFVkbHPUlJVKRO\nw/0n1yWy7XcwTmQQyaodFYggKMCdyR0ElPxLv8dKYFjLvef2DTmuYwbalt5hiQqO\npY1wm616Xf4ywz2uEU+ooLW4/Q6DcRUBAgMBAAGjUDBOMB0GA1UdDgQWBBTxyT32\nExu5TuortZY8zkVotLDNDTAfBgNVHSMEGDAWgBTxyT32Exu5TuortZY8zkVotLDN\nDTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAu+0B+caaV4HzIHyfX\nZc6BUPcRoTEZIWX/7JLeeOVUztOjl9ExjYTzjo0QEt+PVcOzfQL/hxE2SPG6fRF7\nYRZU1h9t5Ti9rTg9myAbGGMo6MdWZULFcxIWjxhv6qnFPk/fF47PvGwjygFNnzv8\nFYmrAI99kK0CYolvXZ5ue250dpE/TCIAyk09a3WeBbHU/hMR/mBUNsitphelDbNK\noohrY9D7QR5Mf/NZgx3a0eDH6zoMYDRPARY3M02EuHHiRKmlyfnPv4ns4/0wCarj\npKpds+G80+k2fyiMgQ5bPTw8sfNgq1z0IvIuWB36XSNenTgnnjArbWii+x95jjNw\nXcQg\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/test.sh",
    "content": "#!/bin/bash\nset -e\n\n# a helper script to run tests\n\nif ! which nsqd >/dev/null; then\n    echo \"missing nsqd binary\" && exit 1\nfi\n\nif ! which nsqlookupd >/dev/null; then\n    echo \"missing nsqlookupd binary\" && exit 1\nfi\n\n# run nsqlookupd\nLOOKUP_LOGFILE=$(mktemp -t nsqlookupd.XXXXXXX)\necho \"starting nsqlookupd\"\necho \"  logging to $LOOKUP_LOGFILE\"\nnsqlookupd >$LOOKUP_LOGFILE 2>&1 &\nLOOKUPD_PID=$!\n\n# run nsqd configured to use our lookupd above\nrm -f *.dat\nNSQD_LOGFILE=$(mktemp -t nsqlookupd.XXXXXXX)\nEXTRA_ARGS=\"--tls-root-ca-file=./test/ca.pem\"\nif [[ $NSQ_DOWNLOAD == nsq-0.2.24* ]] || [[ $NSQ_DOWNLOAD == nsq-0.2.27* ]]; then\n    EXTRA_ARGS=\"\"\nfi\necho \"starting nsqd --data-path=/tmp --lookupd-tcp-address=127.0.0.1:4160 --tls-cert=./test/server.pem --tls-key=./test/server.key $EXTRA_ARGS\"\necho \"  logging to $NSQD_LOGFILE\"\nnsqd --data-path=/tmp --lookupd-tcp-address=127.0.0.1:4160 --tls-cert=./test/server.pem --tls-key=./test/server.key $EXTRA_ARGS >$NSQD_LOGFILE 2>&1 &\nNSQD_PID=$!\n\nsleep 0.3\n\ncleanup() {\n    echo \"killing nsqd PID $NSQD_PID\"\n    kill -s TERM $NSQD_PID || cat $NSQD_LOGFILE\n    echo \"killing nsqlookupd PID $LOOKUPD_PID\"\n    kill -s TERM $LOOKUPD_PID || cat $LOOKUP_LOGFILE\n}\ntrap cleanup INT TERM EXIT\n\ngo test -v -timeout 60s\n"
  },
  {
    "path": "vendor/github.com/bitly/go-nsq/version.go",
    "content": "// Package nsq is the official Go package for NSQ (http://nsq.io/)\n//\n// It provides high-level Consumer and Producer types as well as low-level\n// functions to communicate over the NSQ protocol\npackage nsq\n\n// VERSION\nconst VERSION = \"1.0.5-alpha\"\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/LICENSE.txt",
    "content": "Copyright (c) 2012, Cloud Instruments Co., Ltd. <info@cin.io>\r\nAll rights reserved.\r\n\r\nRedistribution and use in source and binary forms, with or without\r\nmodification, are permitted provided that the following conditions are met:\r\n    * Redistributions of source code must retain the above copyright\r\n      notice, this list of conditions and the following disclaimer.\r\n    * Redistributions in binary form must reproduce the above copyright\r\n      notice, this list of conditions and the following disclaimer in the\r\n      documentation and/or other materials provided with the distribution.\r\n    * Neither the name of the Cloud Instruments Co., Ltd. nor the\r\n      names of its contributors may be used to endorse or promote products\r\n      derived from this software without specific prior written permission.\r\n\r\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\r\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\nDISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\r\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/README.markdown",
    "content": "Seelog\n=======\n\nSeelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages.\nIt is natively written in the [Go](http://golang.org/) programming language. \n\n[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest)\n\nFeatures\n------------------\n\n* Xml configuring to be able to change logger parameters without recompilation\n* Changing configurations on the fly without app restart\n* Possibility to set different log configurations for different project files and functions\n* Adjustable message formatting\n* Simultaneous log output to multiple streams\n* Choosing logger priority strategy to minimize performance hit\n* Different output writers\n  * Console writer\n  * File writer \n  * Buffered writer (Chunk writer)\n  * Rolling log writer (Logging with rotation)\n  * SMTP writer\n  * Others... (See [Wiki](https://github.com/cihub/seelog/wiki))\n* Log message wrappers (JSON, XML, etc.)\n* Global variables and functions for easy usage in standalone apps\n* Functions for flexible usage in libraries\n\nQuick-start\n-----------\n\n```go\npackage main\n\nimport log \"github.com/cihub/seelog\"\n\nfunc main() {\n    defer log.Flush()\n    log.Info(\"Hello from Seelog!\")\n}\n```\n\nInstallation\n------------\n\nIf you don't have the Go development environment installed, visit the \n[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command:\n\n```\ngo get -u github.com/cihub/seelog\n```\n\n*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get')\n\nDocumentation\n---------------\n\nSeelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki\n\nExamples\n---------------\n\nSeelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples)\n\nIssues\n---------------\n\nFeel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues\n\nChangelog\n---------------\n\n* **v2.5** : Interaction with other systems. Part 2: custom receivers\n    * Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers)\n    * Added 'LoggerFromCustomReceiver'\n    * Added 'LoggerFromWriterWithMinLevelAndFormat'\n    * Added 'LoggerFromCustomReceiver'\n    * Added 'LoggerFromParamConfigAs...' \n* **v2.4** : Interaction with other systems. Part 1: wrapping seelog\n    * Added configurable caller stack skip logic\n    * Added 'SetAdditionalStackDepth' to 'LoggerInterface'\n* **v2.3** : Rethinking 'rolling' receiver\n    * Reimplemented 'rolling' receiver\n    * Added 'Max rolls' feature for 'rolling' receiver with type='date'\n    * Fixed 'rolling' receiver issue: renaming on Windows\n* **v2.2** : go1.0 compatibility point [go1.0 tag]\n    * Fixed internal bugs\n    * Added 'ANSI n [;k]' format identifier:  %EscN\n    * Made current release go1 compatible \n* **v2.1** : Some new features\n    * Rolling receiver archiving option.\n    * Added format identifier: %Line\n    * Smtp: added paths to PEM files directories\n    * Added format identifier: %FuncShort\n    * Warn, Error and Critical methods now return an error\n* **v2.0** : Second major release. BREAKING CHANGES.\n    * Support of binaries with stripped symbols\n    * Added log strategy: adaptive\n    * Critical message now forces Flush()\n    * Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast\n    * Added receiver: conn (network connection writer)\n    * BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle\n    * Bug fixes\n* **v1.0** : Initial release. Features:\n    * Xml config\n    * Changing configurations on the fly without app restart\n    * Contraints and exceptions\n    * Formatting\n    * Log strategies: sync, async loop, async timer\n    * Receivers: buffered, console, file, rolling, smtp\n\n\n\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_adaptive_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc countSequencedRowsInFile(filePath string) (int64, error) {\n\tbts, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tbufReader := bufio.NewReader(bytes.NewBuffer(bts))\n\n\tvar gotCounter int64\n\tfor {\n\t\tline, _, bufErr := bufReader.ReadLine()\n\t\tif bufErr != nil && bufErr != io.EOF {\n\t\t\treturn 0, bufErr\n\t\t}\n\n\t\tlineString := string(line)\n\t\tif lineString == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\tintVal, atoiErr := strconv.ParseInt(lineString, 10, 64)\n\t\tif atoiErr != nil {\n\t\t\treturn 0, atoiErr\n\t\t}\n\n\t\tif intVal != gotCounter {\n\t\t\treturn 0, fmt.Errorf(\"wrong order: %d Expected: %d\\n\", intVal, gotCounter)\n\t\t}\n\n\t\tgotCounter++\n\t}\n\n\treturn gotCounter, nil\n}\n\nfunc Test_Adaptive(t *testing.T) {\n\tfileName := \"beh_test_adaptive.log\"\n\tcount := 100\n\n\tCurrent.Close()\n\n\tif e := tryRemoveFile(fileName); e != nil {\n\t\tt.Error(e)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := tryRemoveFile(fileName); e != nil {\n\t\t\tt.Error(e)\n\t\t}\n\t}()\n\n\ttestConfig := `\n<seelog type=\"adaptive\" mininterval=\"1000\" maxinterval=\"1000000\" critmsgcount=\"100\">\n\t<outputs formatid=\"msg\">\n\t\t<file path=\"` + fileName + `\"/>\n\t</outputs>\n\t<formats>\n\t\t<format id=\"msg\" format=\"%Msg%n\"/>\n\t</formats>\n</seelog>`\n\n\tlogger, _ := LoggerFromConfigAsString(testConfig)\n\n\terr := ReplaceLogger(logger)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\tTrace(strconv.Itoa(i))\n\t}\n\n\tFlush()\n\n\tgotCount, err := countSequencedRowsInFile(fileName)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif int64(count) != gotCount {\n\t\tt.Errorf(\"wrong count of log messages. Expected: %v, got: %v.\", count, gotCount)\n\t\treturn\n\t}\n\n\tCurrent.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_adaptivelogger.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n)\n\nvar (\n\tadaptiveLoggerMaxInterval         = time.Minute\n\tadaptiveLoggerMaxCriticalMsgCount = uint32(1000)\n)\n\n// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like\n// an async timer logger, but its interval depends on the current message count\n// in the queue.\n//\n// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c:\n// I = m + (C - Min(c, C)) / C * (M - m)\ntype asyncAdaptiveLogger struct {\n\tasyncLogger\n\tminInterval      time.Duration\n\tcriticalMsgCount uint32\n\tmaxInterval      time.Duration\n}\n\n// newAsyncLoopLogger creates a new asynchronous adaptive logger\nfunc newAsyncAdaptiveLogger(\n\tconfig *logConfig,\n\tminInterval time.Duration,\n\tmaxInterval time.Duration,\n\tcriticalMsgCount uint32) (*asyncAdaptiveLogger, error) {\n\n\tif minInterval <= 0 {\n\t\treturn nil, errors.New(\"async adaptive logger min interval should be > 0\")\n\t}\n\n\tif maxInterval > adaptiveLoggerMaxInterval {\n\t\treturn nil, fmt.Errorf(\"async adaptive logger max interval should be <= %s\",\n\t\t\tadaptiveLoggerMaxInterval)\n\t}\n\n\tif criticalMsgCount <= 0 {\n\t\treturn nil, errors.New(\"async adaptive logger critical msg count should be > 0\")\n\t}\n\n\tif criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount {\n\t\treturn nil, fmt.Errorf(\"async adaptive logger critical msg count should be <= %s\",\n\t\t\tadaptiveLoggerMaxInterval)\n\t}\n\n\tasnAdaptiveLogger := new(asyncAdaptiveLogger)\n\n\tasnAdaptiveLogger.asyncLogger = *newAsyncLogger(config)\n\tasnAdaptiveLogger.minInterval = minInterval\n\tasnAdaptiveLogger.maxInterval = maxInterval\n\tasnAdaptiveLogger.criticalMsgCount = criticalMsgCount\n\n\tgo asnAdaptiveLogger.processQueue()\n\n\treturn asnAdaptiveLogger, nil\n}\n\nfunc (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) {\n\tasnAdaptiveLogger.queueHasElements.L.Lock()\n\tdefer asnAdaptiveLogger.queueHasElements.L.Unlock()\n\n\tfor asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.closed {\n\t\tasnAdaptiveLogger.queueHasElements.Wait()\n\t}\n\n\tif asnAdaptiveLogger.closed {\n\t\treturn true, asnAdaptiveLogger.msgQueue.Len()\n\t}\n\n\tasnAdaptiveLogger.processQueueElement()\n\treturn false, asnAdaptiveLogger.msgQueue.Len() - 1\n}\n\n// I = m + (C - Min(c, C)) / C * (M - m) =>\n// I = m + cDiff * mDiff,\n// \t\tcDiff = (C - Min(c, C)) / C)\n//\t\tmDiff = (M - m)\nfunc (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration {\n\tcritCountF := float64(asnAdaptiveLogger.criticalMsgCount)\n\tcDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF\n\tmDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval)\n\n\treturn asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff)\n}\n\nfunc (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() {\n\tfor !asnAdaptiveLogger.closed {\n\t\tclosed, itemCount := asnAdaptiveLogger.processItem()\n\n\t\tif closed {\n\t\t\tbreak\n\t\t}\n\n\t\tinterval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount)\n\n\t\t<-time.After(interval)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_asynclogger.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"container/list\"\n\t\"fmt\"\n\t\"sync\"\n)\n\n// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush.\nconst (\n\tMaxQueueSize = 10000\n)\n\ntype msgQueueItem struct {\n\tlevel   LogLevel\n\tcontext LogContextInterface\n\tmessage fmt.Stringer\n}\n\n// asyncLogger represents common data for all asynchronous loggers\ntype asyncLogger struct {\n\tcommonLogger\n\tmsgQueue         *list.List\n\tqueueHasElements *sync.Cond\n}\n\n// newAsyncLogger creates a new asynchronous logger\nfunc newAsyncLogger(config *logConfig) *asyncLogger {\n\tasnLogger := new(asyncLogger)\n\n\tasnLogger.msgQueue = list.New()\n\tasnLogger.queueHasElements = sync.NewCond(new(sync.Mutex))\n\n\tasnLogger.commonLogger = *newCommonLogger(config, asnLogger)\n\n\treturn asnLogger\n}\n\nfunc (asnLogger *asyncLogger) innerLog(\n\tlevel LogLevel,\n\tcontext LogContextInterface,\n\tmessage fmt.Stringer) {\n\n\tasnLogger.addMsgToQueue(level, context, message)\n}\n\nfunc (asnLogger *asyncLogger) Close() {\n\tasnLogger.m.Lock()\n\tdefer asnLogger.m.Unlock()\n\n\tif !asnLogger.closed {\n\t\tasnLogger.flushQueue(true)\n\t\tasnLogger.config.RootDispatcher.Flush()\n\n\t\tif err := asnLogger.config.RootDispatcher.Close(); err != nil {\n\t\t\treportInternalError(err)\n\t\t}\n\n\t\tasnLogger.queueHasElements.Broadcast()\n\t}\n}\n\nfunc (asnLogger *asyncLogger) Flush() {\n\tasnLogger.m.Lock()\n\tdefer asnLogger.m.Unlock()\n\n\tif !asnLogger.closed {\n\t\tasnLogger.flushQueue(true)\n\t\tasnLogger.config.RootDispatcher.Flush()\n\t}\n}\n\nfunc (asnLogger *asyncLogger) flushQueue(lockNeeded bool) {\n\tif lockNeeded {\n\t\tasnLogger.queueHasElements.L.Lock()\n\t\tdefer asnLogger.queueHasElements.L.Unlock()\n\t}\n\n\tfor asnLogger.msgQueue.Len() > 0 {\n\t\tasnLogger.processQueueElement()\n\t}\n}\n\nfunc (asnLogger *asyncLogger) processQueueElement() {\n\tif asnLogger.msgQueue.Len() > 0 {\n\t\tbackElement := asnLogger.msgQueue.Front()\n\t\tmsg, _ := backElement.Value.(msgQueueItem)\n\t\tasnLogger.processLogMsg(msg.level, msg.message, msg.context)\n\t\tasnLogger.msgQueue.Remove(backElement)\n\t}\n}\n\nfunc (asnLogger *asyncLogger) addMsgToQueue(\n\tlevel LogLevel,\n\tcontext LogContextInterface,\n\tmessage fmt.Stringer) {\n\n\tif !asnLogger.closed {\n\t\tasnLogger.queueHasElements.L.Lock()\n\t\tdefer asnLogger.queueHasElements.L.Unlock()\n\n\t\tif asnLogger.msgQueue.Len() >= MaxQueueSize {\n\t\t\tfmt.Printf(\"Seelog queue overflow: more than %v messages in the queue. Flushing.\\n\", MaxQueueSize)\n\t\t\tasnLogger.flushQueue(false)\n\t\t}\n\n\t\tqueueItem := msgQueueItem{level, context, message}\n\n\t\tasnLogger.msgQueue.PushBack(queueItem)\n\t\tasnLogger.queueHasElements.Broadcast()\n\t} else {\n\t\terr := fmt.Errorf(\"queue closed! Cannot process element: %d %#v\", level, message)\n\t\treportInternalError(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_asyncloop_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc Test_Asyncloop(t *testing.T) {\n\tfileName := \"beh_test_asyncloop.log\"\n\tcount := 100\n\n\tCurrent.Close()\n\n\tif e := tryRemoveFile(fileName); e != nil {\n\t\tt.Error(e)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := tryRemoveFile(fileName); e != nil {\n\t\t\tt.Error(e)\n\t\t}\n\t}()\n\n\ttestConfig := `\n<seelog type=\"asyncloop\">\n\t<outputs formatid=\"msg\">\n\t\t<file path=\"` + fileName + `\"/>\n\t</outputs>\n\t<formats>\n\t\t<format id=\"msg\" format=\"%Msg%n\"/>\n\t</formats>\n</seelog>`\n\n\tlogger, _ := LoggerFromConfigAsString(testConfig)\n\terr := ReplaceLogger(logger)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\tTrace(strconv.Itoa(i))\n\t}\n\n\tFlush()\n\n\tgotCount, err := countSequencedRowsInFile(fileName)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif int64(count) != gotCount {\n\t\tt.Errorf(\"wrong count of log messages. Expected: %v, got: %v.\", count, gotCount)\n\t\treturn\n\t}\n\n\tCurrent.Close()\n}\n\nfunc Test_AsyncloopOff(t *testing.T) {\n\tfileName := \"beh_test_asyncloopoff.log\"\n\tcount := 100\n\n\tCurrent.Close()\n\n\tif e := tryRemoveFile(fileName); e != nil {\n\t\tt.Error(e)\n\t\treturn\n\t}\n\n\ttestConfig := `\n<seelog type=\"asyncloop\" levels=\"off\">\n\t<outputs formatid=\"msg\">\n\t\t<file path=\"` + fileName + `\"/>\n\t</outputs>\n\t<formats>\n\t\t<format id=\"msg\" format=\"%Msg%n\"/>\n\t</formats>\n</seelog>`\n\n\tlogger, _ := LoggerFromConfigAsString(testConfig)\n\terr := ReplaceLogger(logger)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\tTrace(strconv.Itoa(i))\n\t}\n\n\tFlush()\n\n\tex, err := fileExists(fileName)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif ex {\n\t\tt.Errorf(\"logger at level OFF is not expected to create log file at all.\")\n\t\tdefer func() {\n\t\t\tif e := tryRemoveFile(fileName); e != nil {\n\t\t\t\tt.Error(e)\n\t\t\t}\n\t\t}()\n\t}\n\n\tCurrent.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_asynclooplogger.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\n// asyncLoopLogger represents asynchronous logger which processes the log queue in\n// a 'for' loop\ntype asyncLoopLogger struct {\n\tasyncLogger\n}\n\n// newAsyncLoopLogger creates a new asynchronous loop logger\nfunc newAsyncLoopLogger(config *logConfig) *asyncLoopLogger {\n\n\tasnLoopLogger := new(asyncLoopLogger)\n\n\tasnLoopLogger.asyncLogger = *newAsyncLogger(config)\n\n\tgo asnLoopLogger.processQueue()\n\n\treturn asnLoopLogger\n}\n\nfunc (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) {\n\tasnLoopLogger.queueHasElements.L.Lock()\n\tdefer asnLoopLogger.queueHasElements.L.Unlock()\n\n\tfor asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.closed {\n\t\tasnLoopLogger.queueHasElements.Wait()\n\t}\n\n\tif asnLoopLogger.closed {\n\t\treturn true\n\t}\n\n\tasnLoopLogger.processQueueElement()\n\treturn false\n}\n\nfunc (asnLoopLogger *asyncLoopLogger) processQueue() {\n\tfor !asnLoopLogger.closed {\n\t\tclosed := asnLoopLogger.processItem()\n\n\t\tif closed {\n\t\t\tbreak\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_asynctimer_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc Test_Asynctimer(t *testing.T) {\n\tfileName := \"beh_test_asynctimer.log\"\n\tcount := 100\n\n\tCurrent.Close()\n\n\tif e := tryRemoveFile(fileName); e != nil {\n\t\tt.Error(e)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := tryRemoveFile(fileName); e != nil {\n\t\t\tt.Error(e)\n\t\t}\n\t}()\n\n\ttestConfig := `\n<seelog type=\"asynctimer\" asyncinterval=\"100\">\n\t<outputs formatid=\"msg\">\n\t\t<file path=\"` + fileName + `\"/>\n\t</outputs>\n\t<formats>\n\t\t<format id=\"msg\" format=\"%Msg%n\"/>\n\t</formats>\n</seelog>`\n\n\tlogger, _ := LoggerFromConfigAsString(testConfig)\n\terr := ReplaceLogger(logger)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\tTrace(strconv.Itoa(i))\n\t}\n\n\tFlush()\n\n\tgotCount, err := countSequencedRowsInFile(fileName)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif int64(count) != gotCount {\n\t\tt.Errorf(\"wrong count of log messages. Expected: %v, got: %v.\", count, gotCount)\n\t\treturn\n\t}\n\n\tCurrent.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"time\"\n)\n\n// asyncTimerLogger represents asynchronous logger which processes the log queue each\n// 'duration' nanoseconds\ntype asyncTimerLogger struct {\n\tasyncLogger\n\tinterval time.Duration\n}\n\n// newAsyncLoopLogger creates a new asynchronous loop logger\nfunc newAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) {\n\n\tif interval <= 0 {\n\t\treturn nil, errors.New(\"async logger interval should be > 0\")\n\t}\n\n\tasnTimerLogger := new(asyncTimerLogger)\n\n\tasnTimerLogger.asyncLogger = *newAsyncLogger(config)\n\tasnTimerLogger.interval = interval\n\n\tgo asnTimerLogger.processQueue()\n\n\treturn asnTimerLogger, nil\n}\n\nfunc (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) {\n\tasnTimerLogger.queueHasElements.L.Lock()\n\tdefer asnTimerLogger.queueHasElements.L.Unlock()\n\n\tfor asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.closed {\n\t\tasnTimerLogger.queueHasElements.Wait()\n\t}\n\n\tif asnTimerLogger.closed {\n\t\treturn true\n\t}\n\n\tasnTimerLogger.processQueueElement()\n\treturn false\n}\n\nfunc (asnTimerLogger *asyncTimerLogger) processQueue() {\n\tfor !asnTimerLogger.closed {\n\t\tclosed := asnTimerLogger.processItem()\n\n\t\tif closed {\n\t\t\tbreak\n\t\t}\n\n\t\t<-time.After(asnTimerLogger.interval)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_synclogger.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n)\n\n// syncLogger performs logging in the same goroutine where 'Trace/Debug/...'\n// func was called\ntype syncLogger struct {\n\tcommonLogger\n}\n\n// newSyncLogger creates a new synchronous logger\nfunc newSyncLogger(config *logConfig) *syncLogger {\n\tsyncLogger := new(syncLogger)\n\n\tsyncLogger.commonLogger = *newCommonLogger(config, syncLogger)\n\n\treturn syncLogger\n}\n\nfunc (syncLogger *syncLogger) innerLog(\n\tlevel LogLevel,\n\tcontext LogContextInterface,\n\tmessage fmt.Stringer) {\n\n\tsyncLogger.processLogMsg(level, message, context)\n}\n\nfunc (syncLogger *syncLogger) Close() {\n\tsyncLogger.m.Lock()\n\tdefer syncLogger.m.Unlock()\n\n\tif !syncLogger.closed {\n\t\tif err := syncLogger.config.RootDispatcher.Close(); err != nil {\n\t\t\treportInternalError(err)\n\t\t}\n\t}\n}\n\nfunc (syncLogger *syncLogger) Flush() {\n\tsyncLogger.m.Lock()\n\tdefer syncLogger.m.Unlock()\n\n\tif !syncLogger.closed {\n\t\tsyncLogger.config.RootDispatcher.Flush()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/behavior_synclogger_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc Test_Sync(t *testing.T) {\n\tfileName := \"beh_test_sync.log\"\n\tcount := 100\n\n\tCurrent.Close()\n\n\tif e := tryRemoveFile(fileName); e != nil {\n\t\tt.Error(e)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := tryRemoveFile(fileName); e != nil {\n\t\t\tt.Error(e)\n\t\t}\n\t}()\n\n\ttestConfig := `\n<seelog type=\"sync\">\n\t<outputs formatid=\"msg\">\n\t\t<file path=\"` + fileName + `\"/>\n\t</outputs>\n\t<formats>\n\t\t<format id=\"msg\" format=\"%Msg%n\"/>\n\t</formats>\n</seelog>`\n\n\tlogger, _ := LoggerFromConfigAsString(testConfig)\n\terr := ReplaceLogger(logger)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\tTrace(strconv.Itoa(i))\n\t}\n\n\tgotCount, err := countSequencedRowsInFile(fileName)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif int64(count) != gotCount {\n\t\tt.Errorf(\"wrong count of log messages. Expected: %v, got: %v.\", count, gotCount)\n\t\treturn\n\t}\n\n\tCurrent.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/cfg_config.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"io\"\n\t\"os\"\n)\n\n// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml.\nfunc LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) {\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tconf, err := configFromReader(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createLoggerFromConfig(conf)\n}\n\n// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml.\nfunc LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) {\n\tconf, err := configFromReader(bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createLoggerFromConfig(conf)\n}\n\n// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml.\nfunc LoggerFromConfigAsString(data string) (LoggerInterface, error) {\n\treturn LoggerFromConfigAsBytes([]byte(data))\n}\n\n// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options.\n// See 'CfgParseParams' comments.\nfunc LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) {\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tconf, err := configFromReaderWithConfig(file, parserParams)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createLoggerFromConfig(conf)\n}\n\n// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options.\n// See 'CfgParseParams' comments.\nfunc LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) {\n\tconf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createLoggerFromConfig(conf)\n}\n\n// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options.\n// See 'CfgParseParams' comments.\nfunc LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) {\n\treturn LoggerFromParamConfigAsBytes([]byte(data), parserParams)\n}\n\n// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)\nfunc LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) {\n\treturn LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)\n}\n\n// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the\n// receiver with minimal level = minLevel and with specified format.\n//\n// All messages with level more or equal to minLevel will be written to output and\n// formatted using the default seelog format.\n//\n// Can be called for usage with non-Seelog systems\nfunc LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) {\n\tconstraints, err := newMinMaxConstraints(minLevel, CriticalLvl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tformatter, err := newFormatter(format)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdispatcher, err := newSplitDispatcher(formatter, []interface{}{output})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf, err := newConfig(constraints, make([]*logLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createLoggerFromConfig(conf)\n}\n\n// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node.\n// It should contain valid seelog xml, except for root node name.\nfunc LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) {\n\tconf, err := configFromXMLDecoder(xmlParser, rootNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createLoggerFromConfig(conf)\n}\n\n// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the\n// receiver.\n//\n// All messages will be sent to the specified custom receiver without additional\n// formatting ('%Msg' format is used).\n//\n// Check CustomReceiver, RegisterReceiver for additional info.\n//\n// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated\n// by the config parser while parsing config. So, if you are not planning to use the\n// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and\n// loading from config, just leave AfterParse implementation empty.\n//\n// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized\n// instance that implements CustomReceiver. So, fill it with data and perform any initialization\n// logic before calling this func and it won't be lost.\n//\n// So:\n// * RegisterReceiver takes value just to get the reflect.Type from it and then\n// instantiate it as many times as config is reloaded.\n//\n// * LoggerFromCustomReceiver takes value and uses it without modification and\n// reinstantiation, directy passing it to the dispatcher tree.\nfunc LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) {\n\tconstraints, err := newMinMaxConstraints(TraceLvl, CriticalLvl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toutput, err := newCustomReceiverDispatcherByValue(msgonlyformatter, receiver, \"user-proxy\", CustomReceiverInitArgs{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdispatcher, err := newSplitDispatcher(msgonlyformatter, []interface{}{output})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf, err := newConfig(constraints, make([]*logLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createLoggerFromConfig(conf)\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/cfg_errors.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\terrNodeMustHaveChildren   = errors.New(\"node must have children\")\n\terrNodeCannotHaveChildren = errors.New(\"node cannot have children\")\n)\n\ntype unexpectedChildElementError struct {\n\tbaseError\n}\n\nfunc newUnexpectedChildElementError(msg string) *unexpectedChildElementError {\n\tcustmsg := \"Unexpected child element: \" + msg\n\treturn &unexpectedChildElementError{baseError{message: custmsg}}\n}\n\ntype missingArgumentError struct {\n\tbaseError\n}\n\nfunc newMissingArgumentError(nodeName, attrName string) *missingArgumentError {\n\tcustmsg := \"Output '\" + nodeName + \"' has no '\" + attrName + \"' attribute\"\n\treturn &missingArgumentError{baseError{message: custmsg}}\n}\n\ntype unexpectedAttributeError struct {\n\tbaseError\n}\n\nfunc newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError {\n\tcustmsg := nodeName + \" has unexpected attribute: \" + attr\n\treturn &unexpectedAttributeError{baseError{message: custmsg}}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/cfg_logconfig.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n)\n\ntype loggerTypeFromString uint8\n\nconst (\n\tsyncloggerTypeFromString = iota\n\tasyncLooploggerTypeFromString\n\tasyncTimerloggerTypeFromString\n\tadaptiveLoggerTypeFromString\n\tdefaultloggerTypeFromString = asyncLooploggerTypeFromString\n)\n\nconst (\n\tsyncloggerTypeFromStringStr       = \"sync\"\n\tasyncloggerTypeFromStringStr      = \"asyncloop\"\n\tasyncTimerloggerTypeFromStringStr = \"asynctimer\"\n\tadaptiveLoggerTypeFromStringStr   = \"adaptive\"\n)\n\n// asyncTimerLoggerData represents specific data for async timer logger\ntype asyncTimerLoggerData struct {\n\tAsyncInterval uint32\n}\n\n// adaptiveLoggerData represents specific data for adaptive timer logger\ntype adaptiveLoggerData struct {\n\tMinInterval      uint32\n\tMaxInterval      uint32\n\tCriticalMsgCount uint32\n}\n\nvar loggerTypeToStringRepresentations = map[loggerTypeFromString]string{\n\tsyncloggerTypeFromString:       syncloggerTypeFromStringStr,\n\tasyncLooploggerTypeFromString:  asyncloggerTypeFromStringStr,\n\tasyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr,\n\tadaptiveLoggerTypeFromString:   adaptiveLoggerTypeFromStringStr,\n}\n\n// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful.\nfunc getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) {\n\tfor logType, logTypeStr := range loggerTypeToStringRepresentations {\n\t\tif logTypeStr == logTypeString {\n\t\t\treturn logType, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\n// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules\n// (general constraints and exceptions), and messages formats (used by nodes of dispatcher tree)\ntype logConfig struct {\n\tConstraints    logLevelConstraints  // General log level rules (>min and <max, or set of allowed levels)\n\tExceptions     []*logLevelException // Exceptions to general rules for specific files or funcs\n\tRootDispatcher dispatcherInterface  // Root of output tree\n\tLogType        loggerTypeFromString\n\tLoggerData     interface{}\n\tParams         *CfgParseParams // Check cfg_parser: CfgParseParams\n}\n\nfunc newConfig(\n\tconstraints logLevelConstraints,\n\texceptions []*logLevelException,\n\trootDispatcher dispatcherInterface,\n\tlogType loggerTypeFromString,\n\tlogData interface{},\n\tcfgParams *CfgParseParams) (*logConfig, error) {\n\tif constraints == nil {\n\t\treturn nil, errors.New(\"constraints can not be nil\")\n\t}\n\tif rootDispatcher == nil {\n\t\treturn nil, errors.New(\"rootDispatcher can not be nil\")\n\t}\n\n\tconfig := new(logConfig)\n\tconfig.Constraints = constraints\n\tconfig.Exceptions = exceptions\n\tconfig.RootDispatcher = rootDispatcher\n\tconfig.LogType = logType\n\tconfig.LoggerData = logData\n\tconfig.Params = cfgParams\n\n\treturn config, nil\n}\n\n// IsAllowed returns true if logging with specified log level is allowed in current context.\n// If any of exception patterns match current context, then exception constraints are applied. Otherwise,\n// the general constraints are used.\nfunc (config *logConfig) IsAllowed(level LogLevel, context LogContextInterface) bool {\n\tallowed := config.Constraints.IsAllowed(level) // General rule\n\n\t// Exceptions:\n\tif context.IsValid() {\n\t\tfor _, exception := range config.Exceptions {\n\t\t\tif exception.MatchesContext(context) {\n\t\t\t\treturn exception.IsAllowed(level)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn allowed\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/cfg_logconfig_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestConfig(t *testing.T) {\n\ttestConfig :=\n\t\t`\n<seelog levels=\"trace, debug\">\n\t<exceptions>\n\t\t<exception funcpattern=\"*getFirst*\" filepattern=\"*\" minlevel=\"off\" />\n\t\t<exception funcpattern=\"*getSecond*\" filepattern=\"*\" levels=\"info, error\" />\n\t</exceptions>\n</seelog>\n`\n\n\tconf, err := configFromReader(strings.NewReader(testConfig))\n\tif err != nil {\n\t\tt.Errorf(\"parse error: %s\\n\", err.Error())\n\t\treturn\n\t}\n\n\tcontext, err := currentContext()\n\tif err != nil {\n\t\tt.Errorf(\"cannot get current context:\" + err.Error())\n\t\treturn\n\t}\n\tfirstContext, err := getFirstContext()\n\tif err != nil {\n\t\tt.Errorf(\"cannot get current context:\" + err.Error())\n\t\treturn\n\t}\n\tsecondContext, err := getSecondContext()\n\tif err != nil {\n\t\tt.Errorf(\"cannot get current context:\" + err.Error())\n\t\treturn\n\t}\n\n\tif !conf.IsAllowed(TraceLvl, context) {\n\t\tt.Errorf(\"error: deny trace in current context\")\n\t}\n\tif conf.IsAllowed(TraceLvl, firstContext) {\n\t\tt.Errorf(\"error: allow trace in first context\")\n\t}\n\tif conf.IsAllowed(ErrorLvl, context) {\n\t\tt.Errorf(\"error: allow error in current context\")\n\t}\n\tif !conf.IsAllowed(ErrorLvl, secondContext) {\n\t\tt.Errorf(\"error: deny error in second context\")\n\t}\n\n\t// cache test\n\tif !conf.IsAllowed(TraceLvl, context) {\n\t\tt.Errorf(\"error: deny trace in current context\")\n\t}\n\tif conf.IsAllowed(TraceLvl, firstContext) {\n\t\tt.Errorf(\"error: allow trace in first context\")\n\t}\n\tif conf.IsAllowed(ErrorLvl, context) {\n\t\tt.Errorf(\"error: allow error in current context\")\n\t}\n\tif !conf.IsAllowed(ErrorLvl, secondContext) {\n\t\tt.Errorf(\"error: deny error in second context\")\n\t}\n}\n\nfunc getFirstContext() (LogContextInterface, error) {\n\treturn currentContext()\n}\n\nfunc getSecondContext() (LogContextInterface, error) {\n\treturn currentContext()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/cfg_parser.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"crypto/tls\"\n\t\"encoding/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n// Names of elements of seelog config.\nconst (\n\tseelogConfigID                   = \"seelog\"\n\toutputsID                        = \"outputs\"\n\tformatsID                        = \"formats\"\n\tminLevelID                       = \"minlevel\"\n\tmaxLevelID                       = \"maxlevel\"\n\tlevelsID                         = \"levels\"\n\texceptionsID                     = \"exceptions\"\n\texceptionID                      = \"exception\"\n\tfuncPatternID                    = \"funcpattern\"\n\tfilePatternID                    = \"filepattern\"\n\tformatID                         = \"format\"\n\tformatAttrID                     = \"format\"\n\tformatKeyAttrID                  = \"id\"\n\toutputFormatID                   = \"formatid\"\n\tpathID                           = \"path\"\n\tfileWriterID                     = \"file\"\n\tsmtpWriterID                     = \"smtp\"\n\tsenderaddressID                  = \"senderaddress\"\n\tsenderNameID                     = \"sendername\"\n\trecipientID                      = \"recipient\"\n\tmailHeaderID                     = \"header\"\n\tmailHeaderNameID                 = \"name\"\n\tmailHeaderValueID                = \"value\"\n\taddressID                        = \"address\"\n\thostNameID                       = \"hostname\"\n\thostPortID                       = \"hostport\"\n\tuserNameID                       = \"username\"\n\tuserPassID                       = \"password\"\n\tcACertDirpathID                  = \"cacertdirpath\"\n\tsubjectID                        = \"subject\"\n\tsplitterDispatcherID             = \"splitter\"\n\tconsoleWriterID                  = \"console\"\n\tcustomReceiverID                 = \"custom\"\n\tcustomNameAttrID                 = \"name\"\n\tcustomNameDataAttrPrefix         = \"data-\"\n\tfilterDispatcherID               = \"filter\"\n\tfilterLevelsAttrID               = \"levels\"\n\trollingfileWriterID              = \"rollingfile\"\n\trollingFileTypeAttr              = \"type\"\n\trollingFilePathAttr              = \"filename\"\n\trollingFileMaxSizeAttr           = \"maxsize\"\n\trollingFileMaxRollsAttr          = \"maxrolls\"\n\trollingFileNameModeAttr          = \"namemode\"\n\trollingFileDataPatternAttr       = \"datepattern\"\n\trollingFileArchiveAttr           = \"archivetype\"\n\trollingFileArchivePathAttr       = \"archivepath\"\n\tbufferedWriterID                 = \"buffered\"\n\tbufferedSizeAttr                 = \"size\"\n\tbufferedFlushPeriodAttr          = \"flushperiod\"\n\tloggerTypeFromStringAttr         = \"type\"\n\tasyncLoggerIntervalAttr          = \"asyncinterval\"\n\tadaptLoggerMinIntervalAttr       = \"mininterval\"\n\tadaptLoggerMaxIntervalAttr       = \"maxinterval\"\n\tadaptLoggerCriticalMsgCountAttr  = \"critmsgcount\"\n\tpredefinedPrefix                 = \"std:\"\n\tconnWriterID                     = \"conn\"\n\tconnWriterAddrAttr               = \"addr\"\n\tconnWriterNetAttr                = \"net\"\n\tconnWriterReconnectOnMsgAttr     = \"reconnectonmsg\"\n\tconnWriterUseTLSAttr             = \"tls\"\n\tconnWriterInsecureSkipVerifyAttr = \"insecureskipverify\"\n)\n\n// CustomReceiverProducer is the signature of the function CfgParseParams needs to create\n// custom receivers.\ntype CustomReceiverProducer func(CustomReceiverInitArgs) (CustomReceiver, error)\n\n// CfgParseParams represent specific parse options or flags used by parser. It is used if seelog parser needs\n// some special directives or additional info to correctly parse a config.\ntype CfgParseParams struct {\n\t// CustomReceiverProducers expose the same functionality as RegisterReceiver func\n\t// but only in the scope (context) of the config parse func instead of a global package scope.\n\t//\n\t// It means that if you use custom receivers in your code, you may either register them globally once with\n\t// RegisterReceiver or you may call funcs like LoggerFromParamConfigAsFile (with 'ParamConfig')\n\t// and use CustomReceiverProducers to provide custom producer funcs.\n\t//\n\t// A producer func is called when config parser processes a '<custom>' element. It takes the 'name' attribute\n\t// of the element and tries to find a match in two places:\n\t// 1) CfgParseParams.CustomReceiverProducers map\n\t// 2) Global type map, filled by RegisterReceiver\n\t//\n\t// If a match is found in the CustomReceiverProducers map, parser calls the corresponding producer func\n\t// passing the init args to it.\tThe func takes exactly the same args as CustomReceiver.AfterParse.\n\t// The producer func must return a correct receiver or an error. If case of error, seelog will behave\n\t// in the same way as with any other config error.\n\t//\n\t// You may use this param to set custom producers in case you need to pass some context when instantiating\n\t// a custom receiver or if you frequently change custom receivers with different parameters or in any other\n\t// situation where package-level registering (RegisterReceiver) is not an option for you.\n\tCustomReceiverProducers map[string]CustomReceiverProducer\n}\n\nfunc (cfg *CfgParseParams) String() string {\n\treturn fmt.Sprintf(\"CfgParams: {custom_recs=%d}\", len(cfg.CustomReceiverProducers))\n}\n\ntype elementMapEntry struct {\n\tconstructor func(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error)\n}\n\nvar elementMap map[string]elementMapEntry\nvar predefinedFormats map[string]*formatter\n\nfunc init() {\n\telementMap = map[string]elementMapEntry{\n\t\tfileWriterID:         {createfileWriter},\n\t\tsplitterDispatcherID: {createSplitter},\n\t\tcustomReceiverID:     {createCustomReceiver},\n\t\tfilterDispatcherID:   {createFilter},\n\t\tconsoleWriterID:      {createConsoleWriter},\n\t\trollingfileWriterID:  {createRollingFileWriter},\n\t\tbufferedWriterID:     {createbufferedWriter},\n\t\tsmtpWriterID:         {createSMTPWriter},\n\t\tconnWriterID:         {createconnWriter},\n\t}\n\n\terr := fillPredefinedFormats()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Seelog couldn't start: predefined formats creation failed. Error: %s\", err.Error()))\n\t}\n}\n\nfunc fillPredefinedFormats() error {\n\tpredefinedFormatsWithoutPrefix := map[string]string{\n\t\t\"xml-debug\":       `<time>%Ns</time><lev>%Lev</lev><msg>%Msg</msg><path>%RelFile</path><func>%Func</func><line>%Line</line>`,\n\t\t\"xml-debug-short\": `<t>%Ns</t><l>%l</l><m>%Msg</m><p>%RelFile</p><f>%Func</f>`,\n\t\t\"xml\":             `<time>%Ns</time><lev>%Lev</lev><msg>%Msg</msg>`,\n\t\t\"xml-short\":       `<t>%Ns</t><l>%l</l><m>%Msg</m>`,\n\n\t\t\"json-debug\":       `{\"time\":%Ns,\"lev\":\"%Lev\",\"msg\":\"%Msg\",\"path\":\"%RelFile\",\"func\":\"%Func\",\"line\":\"%Line\"}`,\n\t\t\"json-debug-short\": `{\"t\":%Ns,\"l\":\"%Lev\",\"m\":\"%Msg\",\"p\":\"%RelFile\",\"f\":\"%Func\"}`,\n\t\t\"json\":             `{\"time\":%Ns,\"lev\":\"%Lev\",\"msg\":\"%Msg\"}`,\n\t\t\"json-short\":       `{\"t\":%Ns,\"l\":\"%Lev\",\"m\":\"%Msg\"}`,\n\n\t\t\"debug\":       `[%LEVEL] %RelFile:%Func.%Line %Date %Time %Msg%n`,\n\t\t\"debug-short\": `[%LEVEL] %Date %Time %Msg%n`,\n\t\t\"fast\":        `%Ns %l %Msg%n`,\n\t}\n\n\tpredefinedFormats = make(map[string]*formatter)\n\n\tfor formatKey, format := range predefinedFormatsWithoutPrefix {\n\t\tformatter, err := newFormatter(format)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpredefinedFormats[predefinedPrefix+formatKey] = formatter\n\t}\n\n\treturn nil\n}\n\n// configFromXMLDecoder parses data from a given XML decoder.\n// Returns parsed config which can be used to create logger in case no errors occured.\n// Returns error if format is incorrect or anything happened.\nfunc configFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (*logConfig, error) {\n\treturn configFromXMLDecoderWithConfig(xmlParser, rootNode, nil)\n}\n\n// configFromXMLDecoderWithConfig parses data from a given XML decoder.\n// Returns parsed config which can be used to create logger in case no errors occured.\n// Returns error if format is incorrect or anything happened.\nfunc configFromXMLDecoderWithConfig(xmlParser *xml.Decoder, rootNode xml.Token, cfg *CfgParseParams) (*logConfig, error) {\n\t_, ok := rootNode.(xml.StartElement)\n\tif !ok {\n\t\treturn nil, errors.New(\"rootNode must be XML startElement\")\n\t}\n\n\tconfig, err := unmarshalNode(xmlParser, rootNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif config == nil {\n\t\treturn nil, errors.New(\"xml has no content\")\n\t}\n\n\treturn configFromXMLNodeWithConfig(config, cfg)\n}\n\n// configFromReader parses data from a given reader.\n// Returns parsed config which can be used to create logger in case no errors occured.\n// Returns error if format is incorrect or anything happened.\nfunc configFromReader(reader io.Reader) (*logConfig, error) {\n\treturn configFromReaderWithConfig(reader, nil)\n}\n\n// configFromReaderWithConfig parses data from a given reader.\n// Returns parsed config which can be used to create logger in case no errors occured.\n// Returns error if format is incorrect or anything happened.\nfunc configFromReaderWithConfig(reader io.Reader, cfg *CfgParseParams) (*logConfig, error) {\n\tconfig, err := unmarshalConfig(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.name != seelogConfigID {\n\t\treturn nil, errors.New(\"root xml tag must be '\" + seelogConfigID + \"'\")\n\t}\n\n\treturn configFromXMLNodeWithConfig(config, cfg)\n}\n\nfunc configFromXMLNodeWithConfig(config *xmlNode, cfg *CfgParseParams) (*logConfig, error) {\n\terr := checkUnexpectedAttribute(\n\t\tconfig,\n\t\tminLevelID,\n\t\tmaxLevelID,\n\t\tlevelsID,\n\t\tloggerTypeFromStringAttr,\n\t\tasyncLoggerIntervalAttr,\n\t\tadaptLoggerMinIntervalAttr,\n\t\tadaptLoggerMaxIntervalAttr,\n\t\tadaptLoggerCriticalMsgCountAttr,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = checkExpectedElements(config, optionalElement(outputsID), optionalElement(formatsID), optionalElement(exceptionsID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconstraints, err := getConstraints(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texceptions, err := getExceptions(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = checkDistinctExceptions(exceptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tformats, err := getFormats(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdispatcher, err := getOutputsTree(config, formats, cfg)\n\tif err != nil {\n\t\t// If we open several files, but then fail to parse the config, we should close\n\t\t// those files before reporting that config is invalid.\n\t\tif dispatcher != nil {\n\t\t\tdispatcher.Close()\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tloggerType, logData, err := getloggerTypeFromStringData(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newConfig(constraints, exceptions, dispatcher, loggerType, logData, cfg)\n}\n\nfunc getConstraints(node *xmlNode) (logLevelConstraints, error) {\n\tminLevelStr, isMinLevel := node.attributes[minLevelID]\n\tmaxLevelStr, isMaxLevel := node.attributes[maxLevelID]\n\tlevelsStr, isLevels := node.attributes[levelsID]\n\n\tif isLevels && (isMinLevel && isMaxLevel) {\n\t\treturn nil, errors.New(\"for level declaration use '\" + levelsID + \"'' OR '\" + minLevelID +\n\t\t\t\"', '\" + maxLevelID + \"'\")\n\t}\n\n\toffString := LogLevel(Off).String()\n\n\tif (isLevels && strings.TrimSpace(levelsStr) == offString) ||\n\t\t(isMinLevel && !isMaxLevel && minLevelStr == offString) {\n\n\t\treturn newOffConstraints()\n\t}\n\n\tif isLevels {\n\t\tlevels, err := parseLevels(levelsStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newListConstraints(levels)\n\t}\n\n\tvar minLevel = LogLevel(TraceLvl)\n\tif isMinLevel {\n\t\tfound := true\n\t\tminLevel, found = LogLevelFromString(minLevelStr)\n\t\tif !found {\n\t\t\treturn nil, errors.New(\"declared \" + minLevelID + \" not found: \" + minLevelStr)\n\t\t}\n\t}\n\n\tvar maxLevel = LogLevel(CriticalLvl)\n\tif isMaxLevel {\n\t\tfound := true\n\t\tmaxLevel, found = LogLevelFromString(maxLevelStr)\n\t\tif !found {\n\t\t\treturn nil, errors.New(\"declared \" + maxLevelID + \" not found: \" + maxLevelStr)\n\t\t}\n\t}\n\n\treturn newMinMaxConstraints(minLevel, maxLevel)\n}\n\nfunc parseLevels(str string) ([]LogLevel, error) {\n\tlevelsStrArr := strings.Split(strings.Replace(str, \" \", \"\", -1), \",\")\n\tvar levels []LogLevel\n\tfor _, levelStr := range levelsStrArr {\n\t\tlevel, found := LogLevelFromString(levelStr)\n\t\tif !found {\n\t\t\treturn nil, errors.New(\"declared level not found: \" + levelStr)\n\t\t}\n\n\t\tlevels = append(levels, level)\n\t}\n\n\treturn levels, nil\n}\n\nfunc getExceptions(config *xmlNode) ([]*logLevelException, error) {\n\tvar exceptions []*logLevelException\n\n\tvar exceptionsNode *xmlNode\n\tfor _, child := range config.children {\n\t\tif child.name == exceptionsID {\n\t\t\texceptionsNode = child\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif exceptionsNode == nil {\n\t\treturn exceptions, nil\n\t}\n\n\terr := checkUnexpectedAttribute(exceptionsNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = checkExpectedElements(exceptionsNode, multipleMandatoryElements(\"exception\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, exceptionNode := range exceptionsNode.children {\n\t\tif exceptionNode.name != exceptionID {\n\t\t\treturn nil, errors.New(\"incorrect nested element in exceptions section: \" + exceptionNode.name)\n\t\t}\n\n\t\terr := checkUnexpectedAttribute(exceptionNode, minLevelID, maxLevelID, levelsID, funcPatternID, filePatternID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconstraints, err := getConstraints(exceptionNode)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"incorrect \" + exceptionsID + \" node: \" + err.Error())\n\t\t}\n\n\t\tfuncPattern, isFuncPattern := exceptionNode.attributes[funcPatternID]\n\t\tfilePattern, isFilePattern := exceptionNode.attributes[filePatternID]\n\t\tif !isFuncPattern {\n\t\t\tfuncPattern = \"*\"\n\t\t}\n\t\tif !isFilePattern {\n\t\t\tfilePattern = \"*\"\n\t\t}\n\n\t\texception, err := newLogLevelException(funcPattern, filePattern, constraints)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"incorrect exception node: \" + err.Error())\n\t\t}\n\n\t\texceptions = append(exceptions, exception)\n\t}\n\n\treturn exceptions, nil\n}\n\nfunc checkDistinctExceptions(exceptions []*logLevelException) error {\n\tfor i, exception := range exceptions {\n\t\tfor j, exception1 := range exceptions {\n\t\t\tif i == j {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif exception.FuncPattern() == exception1.FuncPattern() &&\n\t\t\t\texception.FilePattern() == exception1.FilePattern() {\n\n\t\t\t\treturn fmt.Errorf(\"there are two or more duplicate exceptions. Func: %v, file %v\",\n\t\t\t\t\texception.FuncPattern(), exception.FilePattern())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getFormats(config *xmlNode) (map[string]*formatter, error) {\n\tformats := make(map[string]*formatter, 0)\n\n\tvar formatsNode *xmlNode\n\tfor _, child := range config.children {\n\t\tif child.name == formatsID {\n\t\t\tformatsNode = child\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif formatsNode == nil {\n\t\treturn formats, nil\n\t}\n\n\terr := checkUnexpectedAttribute(formatsNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = checkExpectedElements(formatsNode, multipleMandatoryElements(\"format\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, formatNode := range formatsNode.children {\n\t\tif formatNode.name != formatID {\n\t\t\treturn nil, errors.New(\"incorrect nested element in \" + formatsID + \" section: \" + formatNode.name)\n\t\t}\n\n\t\terr := checkUnexpectedAttribute(formatNode, formatKeyAttrID, formatID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tid, isID := formatNode.attributes[formatKeyAttrID]\n\t\tformatStr, isFormat := formatNode.attributes[formatAttrID]\n\t\tif !isID {\n\t\t\treturn nil, errors.New(\"format has no '\" + formatKeyAttrID + \"' attribute\")\n\t\t}\n\t\tif !isFormat {\n\t\t\treturn nil, errors.New(\"format[\" + id + \"] has no '\" + formatAttrID + \"' attribute\")\n\t\t}\n\n\t\tformatter, err := newFormatter(formatStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tformats[id] = formatter\n\t}\n\n\treturn formats, nil\n}\n\nfunc getloggerTypeFromStringData(config *xmlNode) (logType loggerTypeFromString, logData interface{}, err error) {\n\tlogTypeStr, loggerTypeExists := config.attributes[loggerTypeFromStringAttr]\n\n\tif !loggerTypeExists {\n\t\treturn defaultloggerTypeFromString, nil, nil\n\t}\n\n\tlogType, found := getLoggerTypeFromString(logTypeStr)\n\n\tif !found {\n\t\treturn 0, nil, fmt.Errorf(\"unknown logger type: %s\", logTypeStr)\n\t}\n\n\tif logType == asyncTimerloggerTypeFromString {\n\t\tintervalStr, intervalExists := config.attributes[asyncLoggerIntervalAttr]\n\t\tif !intervalExists {\n\t\t\treturn 0, nil, newMissingArgumentError(config.name, asyncLoggerIntervalAttr)\n\t\t}\n\n\t\tinterval, err := strconv.ParseUint(intervalStr, 10, 32)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\n\t\tlogData = asyncTimerLoggerData{uint32(interval)}\n\t} else if logType == adaptiveLoggerTypeFromString {\n\n\t\t// Min interval\n\t\tminIntStr, minIntExists := config.attributes[adaptLoggerMinIntervalAttr]\n\t\tif !minIntExists {\n\t\t\treturn 0, nil, newMissingArgumentError(config.name, adaptLoggerMinIntervalAttr)\n\t\t}\n\t\tminInterval, err := strconv.ParseUint(minIntStr, 10, 32)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\n\t\t// Max interval\n\t\tmaxIntStr, maxIntExists := config.attributes[adaptLoggerMaxIntervalAttr]\n\t\tif !maxIntExists {\n\t\t\treturn 0, nil, newMissingArgumentError(config.name, adaptLoggerMaxIntervalAttr)\n\t\t}\n\t\tmaxInterval, err := strconv.ParseUint(maxIntStr, 10, 32)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\n\t\t// Critical msg count\n\t\tcriticalMsgCountStr, criticalMsgCountExists := config.attributes[adaptLoggerCriticalMsgCountAttr]\n\t\tif !criticalMsgCountExists {\n\t\t\treturn 0, nil, newMissingArgumentError(config.name, adaptLoggerCriticalMsgCountAttr)\n\t\t}\n\t\tcriticalMsgCount, err := strconv.ParseUint(criticalMsgCountStr, 10, 32)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\n\t\tlogData = adaptiveLoggerData{uint32(minInterval), uint32(maxInterval), uint32(criticalMsgCount)}\n\t}\n\n\treturn logType, logData, nil\n}\n\nfunc getOutputsTree(config *xmlNode, formats map[string]*formatter, cfg *CfgParseParams) (dispatcherInterface, error) {\n\tvar outputsNode *xmlNode\n\tfor _, child := range config.children {\n\t\tif child.name == outputsID {\n\t\t\toutputsNode = child\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif outputsNode != nil {\n\t\terr := checkUnexpectedAttribute(outputsNode, outputFormatID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tformatter, err := getCurrentFormat(outputsNode, defaultformatter, formats)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\toutput, err := createSplitter(outputsNode, formatter, formats, cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdispatcher, ok := output.(dispatcherInterface)\n\t\tif ok {\n\t\t\treturn dispatcher, nil\n\t\t}\n\t}\n\n\tconsole, err := newConsoleWriter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newSplitDispatcher(defaultformatter, []interface{}{console})\n}\n\nfunc getCurrentFormat(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter) (*formatter, error) {\n\tformatID, isFormatID := node.attributes[outputFormatID]\n\tif !isFormatID {\n\t\treturn formatFromParent, nil\n\t}\n\n\tformat, ok := formats[formatID]\n\tif ok {\n\t\treturn format, nil\n\t}\n\n\t// Test for predefined format match\n\tpdFormat, pdOk := predefinedFormats[formatID]\n\n\tif !pdOk {\n\t\treturn nil, errors.New(\"formatid = '\" + formatID + \"' doesn't exist\")\n\t}\n\n\treturn pdFormat, nil\n}\n\nfunc createInnerReceivers(node *xmlNode, format *formatter, formats map[string]*formatter, cfg *CfgParseParams) ([]interface{}, error) {\n\tvar outputs []interface{}\n\tfor _, childNode := range node.children {\n\t\tentry, ok := elementMap[childNode.name]\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"unnknown tag '\" + childNode.name + \"' in outputs section\")\n\t\t}\n\n\t\toutput, err := entry.constructor(childNode, format, formats, cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\toutputs = append(outputs, output)\n\t}\n\n\treturn outputs, nil\n}\n\nfunc createSplitter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\terr := checkUnexpectedAttribute(node, outputFormatID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !node.hasChildren() {\n\t\treturn nil, errNodeMustHaveChildren\n\t}\n\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treceivers, err := createInnerReceivers(node, currentFormat, formats, cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newSplitDispatcher(currentFormat, receivers)\n}\n\nfunc createCustomReceiver(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\tdataCustomPrefixes := make(map[string]string)\n\t// Expecting only 'formatid', 'name' and 'data-' attrs\n\tfor attr, attrval := range node.attributes {\n\t\tisExpected := false\n\t\tif attr == outputFormatID ||\n\t\t\tattr == customNameAttrID {\n\t\t\tisExpected = true\n\t\t}\n\t\tif strings.HasPrefix(attr, customNameDataAttrPrefix) {\n\t\t\tdataCustomPrefixes[attr[len(customNameDataAttrPrefix):]] = attrval\n\t\t\tisExpected = true\n\t\t}\n\t\tif !isExpected {\n\t\t\treturn nil, newUnexpectedAttributeError(node.name, attr)\n\t\t}\n\t}\n\n\tif node.hasChildren() {\n\t\treturn nil, errNodeCannotHaveChildren\n\t}\n\tcustomName, hasCustomName := node.attributes[customNameAttrID]\n\tif !hasCustomName {\n\t\treturn nil, newMissingArgumentError(node.name, customNameAttrID)\n\t}\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\targs := CustomReceiverInitArgs{\n\t\tXmlCustomAttrs: dataCustomPrefixes,\n\t}\n\n\tif cfg != nil && cfg.CustomReceiverProducers != nil {\n\t\tif prod, ok := cfg.CustomReceiverProducers[customName]; ok {\n\t\t\trec, err := prod(args)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcreceiver, err := newCustomReceiverDispatcherByValue(currentFormat, rec, customName, args)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = rec.AfterParse(args)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn creceiver, nil\n\t\t}\n\t}\n\n\treturn newCustomReceiverDispatcher(currentFormat, customName, args)\n}\n\nfunc createFilter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\terr := checkUnexpectedAttribute(node, outputFormatID, filterLevelsAttrID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !node.hasChildren() {\n\t\treturn nil, errNodeMustHaveChildren\n\t}\n\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlevelsStr, isLevels := node.attributes[filterLevelsAttrID]\n\tif !isLevels {\n\t\treturn nil, newMissingArgumentError(node.name, filterLevelsAttrID)\n\t}\n\n\tlevels, err := parseLevels(levelsStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treceivers, err := createInnerReceivers(node, currentFormat, formats, cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newFilterDispatcher(currentFormat, receivers, levels...)\n}\n\nfunc createfileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\terr := checkUnexpectedAttribute(node, outputFormatID, pathID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif node.hasChildren() {\n\t\treturn nil, errNodeCannotHaveChildren\n\t}\n\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath, isPath := node.attributes[pathID]\n\tif !isPath {\n\t\treturn nil, newMissingArgumentError(node.name, pathID)\n\t}\n\n\tfileWriter, err := newFileWriter(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newFormattedWriter(fileWriter, currentFormat)\n}\n\n// Creates new SMTP writer if encountered in the config file.\nfunc createSMTPWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\terr := checkUnexpectedAttribute(node, outputFormatID, senderaddressID, senderNameID, hostNameID, hostPortID, userNameID, userPassID, subjectID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Node must have children.\n\tif !node.hasChildren() {\n\t\treturn nil, errNodeMustHaveChildren\n\t}\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsenderAddress, ok := node.attributes[senderaddressID]\n\tif !ok {\n\t\treturn nil, newMissingArgumentError(node.name, senderaddressID)\n\t}\n\tsenderName, ok := node.attributes[senderNameID]\n\tif !ok {\n\t\treturn nil, newMissingArgumentError(node.name, senderNameID)\n\t}\n\t// Process child nodes scanning for recipient email addresses and/or CA certificate paths.\n\tvar recipientAddresses []string\n\tvar caCertDirPaths []string\n\tvar mailHeaders []string\n\tfor _, childNode := range node.children {\n\t\tswitch childNode.name {\n\t\t// Extract recipient address from child nodes.\n\t\tcase recipientID:\n\t\t\taddress, ok := childNode.attributes[addressID]\n\t\t\tif !ok {\n\t\t\t\treturn nil, newMissingArgumentError(childNode.name, addressID)\n\t\t\t}\n\t\t\trecipientAddresses = append(recipientAddresses, address)\n\t\t// Extract CA certificate file path from child nodes.\n\t\tcase cACertDirpathID:\n\t\t\tpath, ok := childNode.attributes[pathID]\n\t\t\tif !ok {\n\t\t\t\treturn nil, newMissingArgumentError(childNode.name, pathID)\n\t\t\t}\n\t\t\tcaCertDirPaths = append(caCertDirPaths, path)\n\n\t\t// Extract email headers from child nodes.\n\t\tcase mailHeaderID:\n\t\t\theaderName, ok := childNode.attributes[mailHeaderNameID]\n\t\t\tif !ok {\n\t\t\t\treturn nil, newMissingArgumentError(childNode.name, mailHeaderNameID)\n\t\t\t}\n\n\t\t\theaderValue, ok := childNode.attributes[mailHeaderValueID]\n\t\t\tif !ok {\n\t\t\t\treturn nil, newMissingArgumentError(childNode.name, mailHeaderValueID)\n\t\t\t}\n\n\t\t\t// Build header line\n\t\t\tmailHeaders = append(mailHeaders, fmt.Sprintf(\"%s: %s\", headerName, headerValue))\n\t\tdefault:\n\t\t\treturn nil, newUnexpectedChildElementError(childNode.name)\n\t\t}\n\t}\n\thostName, ok := node.attributes[hostNameID]\n\tif !ok {\n\t\treturn nil, newMissingArgumentError(node.name, hostNameID)\n\t}\n\n\thostPort, ok := node.attributes[hostPortID]\n\tif !ok {\n\t\treturn nil, newMissingArgumentError(node.name, hostPortID)\n\t}\n\n\t// Check if the string can really be converted into int.\n\tif _, err := strconv.Atoi(hostPort); err != nil {\n\t\treturn nil, errors.New(\"invalid host port number\")\n\t}\n\n\tuserName, ok := node.attributes[userNameID]\n\tif !ok {\n\t\treturn nil, newMissingArgumentError(node.name, userNameID)\n\t}\n\n\tuserPass, ok := node.attributes[userPassID]\n\tif !ok {\n\t\treturn nil, newMissingArgumentError(node.name, userPassID)\n\t}\n\n\t// subject is optionally set by configuration.\n\t// default value is defined by DefaultSubjectPhrase constant in the writers_smtpwriter.go\n\tvar subjectPhrase = DefaultSubjectPhrase\n\n\tsubject, ok := node.attributes[subjectID]\n\tif ok {\n\t\tsubjectPhrase = subject\n\t}\n\n\tsmtpWriter := newSMTPWriter(\n\t\tsenderAddress,\n\t\tsenderName,\n\t\trecipientAddresses,\n\t\thostName,\n\t\thostPort,\n\t\tuserName,\n\t\tuserPass,\n\t\tcaCertDirPaths,\n\t\tsubjectPhrase,\n\t\tmailHeaders,\n\t)\n\n\treturn newFormattedWriter(smtpWriter, currentFormat)\n}\n\nfunc createConsoleWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\terr := checkUnexpectedAttribute(node, outputFormatID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif node.hasChildren() {\n\t\treturn nil, errNodeCannotHaveChildren\n\t}\n\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconsoleWriter, err := newConsoleWriter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newFormattedWriter(consoleWriter, currentFormat)\n}\n\nfunc createconnWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\tif node.hasChildren() {\n\t\treturn nil, errNodeCannotHaveChildren\n\t}\n\n\terr := checkUnexpectedAttribute(node, outputFormatID, connWriterAddrAttr, connWriterNetAttr, connWriterReconnectOnMsgAttr, connWriterUseTLSAttr, connWriterInsecureSkipVerifyAttr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddr, isAddr := node.attributes[connWriterAddrAttr]\n\tif !isAddr {\n\t\treturn nil, newMissingArgumentError(node.name, connWriterAddrAttr)\n\t}\n\n\tnet, isNet := node.attributes[connWriterNetAttr]\n\tif !isNet {\n\t\treturn nil, newMissingArgumentError(node.name, connWriterNetAttr)\n\t}\n\n\treconnectOnMsg := false\n\treconnectOnMsgStr, isReconnectOnMsgStr := node.attributes[connWriterReconnectOnMsgAttr]\n\tif isReconnectOnMsgStr {\n\t\tif reconnectOnMsgStr == \"true\" {\n\t\t\treconnectOnMsg = true\n\t\t} else if reconnectOnMsgStr == \"false\" {\n\t\t\treconnectOnMsg = false\n\t\t} else {\n\t\t\treturn nil, errors.New(\"node '\" + node.name + \"' has incorrect '\" + connWriterReconnectOnMsgAttr + \"' attribute value\")\n\t\t}\n\t}\n\n\tuseTLS := false\n\tuseTLSStr, isUseTLSStr := node.attributes[connWriterUseTLSAttr]\n\tif isUseTLSStr {\n\t\tif useTLSStr == \"true\" {\n\t\t\tuseTLS = true\n\t\t} else if useTLSStr == \"false\" {\n\t\t\tuseTLS = false\n\t\t} else {\n\t\t\treturn nil, errors.New(\"node '\" + node.name + \"' has incorrect '\" + connWriterUseTLSAttr + \"' attribute value\")\n\t\t}\n\t\tif useTLS {\n\t\t\tinsecureSkipVerify := false\n\t\t\tinsecureSkipVerifyStr, isInsecureSkipVerify := node.attributes[connWriterInsecureSkipVerifyAttr]\n\t\t\tif isInsecureSkipVerify {\n\t\t\t\tif insecureSkipVerifyStr == \"true\" {\n\t\t\t\t\tinsecureSkipVerify = true\n\t\t\t\t} else if insecureSkipVerifyStr == \"false\" {\n\t\t\t\t\tinsecureSkipVerify = false\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, errors.New(\"node '\" + node.name + \"' has incorrect '\" + connWriterInsecureSkipVerifyAttr + \"' attribute value\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tconfig := tls.Config{InsecureSkipVerify: insecureSkipVerify}\n\t\t\tconnWriter := newTLSWriter(net, addr, reconnectOnMsg, &config)\n\t\t\treturn newFormattedWriter(connWriter, currentFormat)\n\t\t}\n\t}\n\n\tconnWriter := newConnWriter(net, addr, reconnectOnMsg)\n\n\treturn newFormattedWriter(connWriter, currentFormat)\n}\n\nfunc createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\tif node.hasChildren() {\n\t\treturn nil, errNodeCannotHaveChildren\n\t}\n\n\trollingTypeStr, isRollingType := node.attributes[rollingFileTypeAttr]\n\tif !isRollingType {\n\t\treturn nil, newMissingArgumentError(node.name, rollingFileTypeAttr)\n\t}\n\n\trollingType, ok := rollingTypeFromString(rollingTypeStr)\n\tif !ok {\n\t\treturn nil, errors.New(\"unknown rolling file type: \" + rollingTypeStr)\n\t}\n\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath, isPath := node.attributes[rollingFilePathAttr]\n\tif !isPath {\n\t\treturn nil, newMissingArgumentError(node.name, rollingFilePathAttr)\n\t}\n\n\trollingArchiveStr, archiveAttrExists := node.attributes[rollingFileArchiveAttr]\n\n\tvar rArchiveType rollingArchiveType\n\tvar rArchivePath string\n\tif !archiveAttrExists {\n\t\trArchiveType = rollingArchiveNone\n\t\trArchivePath = \"\"\n\t} else {\n\t\trArchiveType, ok = rollingArchiveTypeFromString(rollingArchiveStr)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"unknown rolling archive type: \" + rollingArchiveStr)\n\t\t}\n\n\t\tif rArchiveType == rollingArchiveNone {\n\t\t\trArchivePath = \"\"\n\t\t} else {\n\t\t\trArchivePath, ok = node.attributes[rollingFileArchivePathAttr]\n\t\t\tif !ok {\n\t\t\t\trArchivePath, ok = rollingArchiveTypesDefaultNames[rArchiveType]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"cannot get default filename for archive type = %v\",\n\t\t\t\t\t\trArchiveType)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tnameMode := rollingNameMode(rollingNameModePostfix)\n\tnameModeStr, ok := node.attributes[rollingFileNameModeAttr]\n\tif ok {\n\t\tmode, found := rollingNameModeFromString(nameModeStr)\n\t\tif !found {\n\t\t\treturn nil, errors.New(\"unknown rolling filename mode: \" + nameModeStr)\n\t\t} else {\n\t\t\tnameMode = mode\n\t\t}\n\t}\n\n\tif rollingType == rollingTypeSize {\n\t\terr := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr,\n\t\t\trollingFileMaxSizeAttr, rollingFileMaxRollsAttr, rollingFileArchiveAttr,\n\t\t\trollingFileArchivePathAttr, rollingFileNameModeAttr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmaxSizeStr, ok := node.attributes[rollingFileMaxSizeAttr]\n\t\tif !ok {\n\t\t\treturn nil, newMissingArgumentError(node.name, rollingFileMaxSizeAttr)\n\t\t}\n\n\t\tmaxSize, err := strconv.ParseInt(maxSizeStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmaxRolls := 0\n\t\tmaxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr]\n\t\tif ok {\n\t\t\tmaxRolls, err = strconv.Atoi(maxRollsStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\trollingWriter, err := newRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn newFormattedWriter(rollingWriter, currentFormat)\n\n\t} else if rollingType == rollingTypeTime {\n\t\terr := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr,\n\t\t\trollingFileDataPatternAttr, rollingFileArchiveAttr, rollingFileMaxRollsAttr,\n\t\t\trollingFileArchivePathAttr, rollingFileNameModeAttr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmaxRolls := 0\n\t\tmaxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr]\n\t\tif ok {\n\t\t\tmaxRolls, err = strconv.Atoi(maxRollsStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tdataPattern, ok := node.attributes[rollingFileDataPatternAttr]\n\t\tif !ok {\n\t\t\treturn nil, newMissingArgumentError(node.name, rollingFileDataPatternAttr)\n\t\t}\n\n\t\trollingWriter, err := newRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, rollingIntervalAny, nameMode)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn newFormattedWriter(rollingWriter, currentFormat)\n\t}\n\n\treturn nil, errors.New(\"incorrect rolling writer type \" + rollingTypeStr)\n}\n\nfunc createbufferedWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) {\n\terr := checkUnexpectedAttribute(node, outputFormatID, bufferedSizeAttr, bufferedFlushPeriodAttr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !node.hasChildren() {\n\t\treturn nil, errNodeMustHaveChildren\n\t}\n\n\tcurrentFormat, err := getCurrentFormat(node, formatFromParent, formats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsizeStr, isSize := node.attributes[bufferedSizeAttr]\n\tif !isSize {\n\t\treturn nil, newMissingArgumentError(node.name, bufferedSizeAttr)\n\t}\n\n\tsize, err := strconv.Atoi(sizeStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflushPeriod := 0\n\tflushPeriodStr, isFlushPeriod := node.attributes[bufferedFlushPeriodAttr]\n\tif isFlushPeriod {\n\t\tflushPeriod, err = strconv.Atoi(flushPeriodStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Inner writer couldn't have its own format, so we pass 'currentFormat' as its parent format\n\treceivers, err := createInnerReceivers(node, currentFormat, formats, cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tformattedWriter, ok := receivers[0].(*formattedWriter)\n\tif !ok {\n\t\treturn nil, errors.New(\"buffered writer's child is not writer\")\n\t}\n\n\t// ... and then we check that it hasn't changed\n\tif formattedWriter.Format() != currentFormat {\n\t\treturn nil, errors.New(\"inner writer cannot have his own format\")\n\t}\n\n\tbufferedWriter, err := newBufferedWriter(formattedWriter.Writer(), size, time.Duration(flushPeriod))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newFormattedWriter(bufferedWriter, currentFormat)\n}\n\n// Returns an error if node has any attributes not listed in expectedAttrs.\nfunc checkUnexpectedAttribute(node *xmlNode, expectedAttrs ...string) error {\n\tfor attr := range node.attributes {\n\t\tisExpected := false\n\t\tfor _, expected := range expectedAttrs {\n\t\t\tif attr == expected {\n\t\t\t\tisExpected = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !isExpected {\n\t\t\treturn newUnexpectedAttributeError(node.name, attr)\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype expectedElementInfo struct {\n\tname      string\n\tmandatory bool\n\tmultiple  bool\n}\n\nfunc optionalElement(name string) expectedElementInfo {\n\treturn expectedElementInfo{name, false, false}\n}\nfunc mandatoryElement(name string) expectedElementInfo {\n\treturn expectedElementInfo{name, true, false}\n}\nfunc multipleElements(name string) expectedElementInfo {\n\treturn expectedElementInfo{name, false, true}\n}\nfunc multipleMandatoryElements(name string) expectedElementInfo {\n\treturn expectedElementInfo{name, true, true}\n}\n\nfunc checkExpectedElements(node *xmlNode, elements ...expectedElementInfo) error {\n\tfor _, element := range elements {\n\t\tcount := 0\n\t\tfor _, child := range node.children {\n\t\t\tif child.name == element.name {\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\n\t\tif count == 0 && element.mandatory {\n\t\t\treturn errors.New(node.name + \" does not have mandatory subnode - \" + element.name)\n\t\t}\n\t\tif count > 1 && !element.multiple {\n\t\t\treturn errors.New(node.name + \" has more then one subnode - \" + element.name)\n\t\t}\n\t}\n\n\tfor _, child := range node.children {\n\t\tisExpected := false\n\t\tfor _, element := range elements {\n\t\t\tif child.name == element.name {\n\t\t\t\tisExpected = true\n\t\t\t}\n\t\t}\n\n\t\tif !isExpected {\n\t\t\treturn errors.New(node.name + \" has unexpected child: \" + child.name)\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/cfg_parser_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype customTestReceiverOutput struct {\n\tinitCalled    bool\n\tdataPassed    string\n\tmessageOutput string\n\tlevelOutput   LogLevel\n\tclosed        bool\n\tflushed       bool\n}\ntype customTestReceiver struct{ co *customTestReceiverOutput }\n\nfunc (cr *customTestReceiver) ReceiveMessage(message string, level LogLevel, context LogContextInterface) error {\n\tcr.co.messageOutput = message\n\tcr.co.levelOutput = level\n\treturn nil\n}\n\nfunc (cr *customTestReceiver) String() string {\n\treturn fmt.Sprintf(\"custom data='%s'\", cr.co.dataPassed)\n}\n\nfunc (cr *customTestReceiver) AfterParse(initArgs CustomReceiverInitArgs) error {\n\tcr.co = new(customTestReceiverOutput)\n\tcr.co.initCalled = true\n\tcr.co.dataPassed = initArgs.XmlCustomAttrs[\"test\"]\n\treturn nil\n}\n\nfunc (cr *customTestReceiver) Flush() {\n\tcr.co.flushed = true\n}\n\nfunc (cr *customTestReceiver) Close() error {\n\tcr.co.closed = true\n\treturn nil\n}\n\nvar re = regexp.MustCompile(`[^a-zA-Z0-9]+`)\n\nfunc getTestFileName(testName, postfix string) string {\n\tif len(postfix) != 0 {\n\t\treturn strings.ToLower(re.ReplaceAllString(testName, \"_\")) + \"_\" + postfix + \"_test.log\"\n\t}\n\treturn strings.ToLower(re.ReplaceAllString(testName, \"_\")) + \"_test.log\"\n}\n\nvar parserTests []parserTest\n\ntype parserTest struct {\n\ttestName      string\n\tconfig        string\n\texpected      *logConfig //interface{}\n\terrorExpected bool\n\tparserConfig  *CfgParseParams\n}\n\nfunc getParserTests() []parserTest {\n\tif parserTests == nil {\n\t\tparserTests = make([]parserTest, 0)\n\n\t\ttestName := \"Simple file output\"\n\t\ttestLogFileName := getTestFileName(testName, \"\")\n\t\ttestConfig := `\n\t\t<seelog>\n\t\t\t<outputs>\n\t\t\t\t<file path=\"` + testLogFileName + `\"/>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected := new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestfileWriter, _ := newFileWriter(testLogFileName)\n\t\ttestHeadSplitter, _ := newSplitDispatcher(defaultformatter, []interface{}{testfileWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Filter dispatcher\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<filter levels=\"debug, info, critical\">\n\t\t\t\t\t<file path=\"` + testLogFileName + `\"/>\n\t\t\t\t</filter>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestfileWriter, _ = newFileWriter(testLogFileName)\n\t\ttestFilter, _ := newFilterDispatcher(defaultformatter, []interface{}{testfileWriter}, DebugLvl, InfoLvl, CriticalLvl)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testFilter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Console writer\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<console />\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ := newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"SMTP writer\"\n\t\ttestConfig = `\n<seelog>\n\t<outputs>\n\t\t<smtp senderaddress=\"sa\" sendername=\"sn\"  hostname=\"hn\" hostport=\"123\" username=\"un\" password=\"up\">\n\t\t\t<recipient address=\"ra1\"/>\n\t\t\t<recipient address=\"ra2\"/>\n\t\t\t<recipient address=\"ra3\"/>\n\t\t\t<cacertdirpath path=\"cacdp1\"/>\n\t\t\t<cacertdirpath path=\"cacdp2\"/>\n\t\t</smtp>\n\t</outputs>\n</seelog>\n\t\t`\n\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestSMTPWriter := newSMTPWriter(\n\t\t\t\"sa\",\n\t\t\t\"sn\",\n\t\t\t[]string{\"ra1\", \"ra2\", \"ra3\"},\n\t\t\t\"hn\",\n\t\t\t\"123\",\n\t\t\t\"un\",\n\t\t\t\"up\",\n\t\t\t[]string{\"cacdp1\", \"cacdp2\"},\n\t\t\tDefaultSubjectPhrase,\n\t\t\tnil,\n\t\t)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testSMTPWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"SMTP writer custom header and subject configuration\"\n\t\ttestConfig = `\n<seelog>\n\t<outputs>\n\t\t<smtp senderaddress=\"sa\" sendername=\"sn\"  hostname=\"hn\" hostport=\"123\" username=\"un\" password=\"up\" subject=\"ohlala\">\n\t\t\t<recipient address=\"ra1\"/>\n\t\t\t<cacertdirpath path=\"cacdp1\"/>\n\t\t\t<header name=\"Priority\" value=\"Urgent\" />\n\t\t\t<header name=\"Importance\" value=\"high\" />\n\t\t\t<header name=\"Sensitivity\" value=\"Company-Confidential\" />\n\t\t\t<header name=\"Auto-Submitted\" value=\"auto-generated\" />\n\t\t</smtp>\n\t</outputs>\n</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestSMTPWriter = newSMTPWriter(\n\t\t\t\"sa\",\n\t\t\t\"sn\",\n\t\t\t[]string{\"ra1\"},\n\t\t\t\"hn\",\n\t\t\t\"123\",\n\t\t\t\"un\",\n\t\t\t\"up\",\n\t\t\t[]string{\"cacdp1\"},\n\t\t\t\"ohlala\",\n\t\t\t[]string{\"Priority: Urgent\", \"Importance: high\", \"Sensitivity: Company-Confidential\", \"Auto-Submitted: auto-generated\"},\n\t\t)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testSMTPWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Default output\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\"/>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Asyncloop behavior\"\n\t\ttestConfig = `\n\t\t<seelog type=\"asyncloop\"/>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Asynctimer behavior\"\n\t\ttestConfig = `\n\t\t<seelog type=\"asynctimer\" asyncinterval=\"101\"/>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = asyncTimerloggerTypeFromString\n\t\ttestExpected.LoggerData = asyncTimerLoggerData{101}\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Rolling file writer size\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<rollingfile type=\"size\" filename=\"` + testLogFileName + `\" maxsize=\"100\" maxrolls=\"5\" />\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestrollingFileWriter, _ := newRollingFileWriterSize(testLogFileName, rollingArchiveNone, \"\", 100, 5, rollingNameModePostfix)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Rolling file writer archive zip\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<rollingfile type=\"size\" filename=\"` + testLogFileName + `\" maxsize=\"100\" maxrolls=\"5\" archivetype=\"zip\"/>\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestrollingFileWriter, _ = newRollingFileWriterSize(testLogFileName, rollingArchiveZip, \"log.zip\", 100, 5, rollingNameModePostfix)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Rolling file writer archive zip with specified path\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<rollingfile namemode=\"prefix\" type=\"size\" filename=\"` + testLogFileName + `\" maxsize=\"100\" maxrolls=\"5\" archivetype=\"zip\" archivepath=\"test.zip\"/>\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestrollingFileWriter, _ = newRollingFileWriterSize(testLogFileName, rollingArchiveZip, \"test.zip\", 100, 5, rollingNameModePrefix)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Rolling file writer archive none\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<rollingfile namemode=\"postfix\" type=\"size\" filename=\"` + testLogFileName + `\" maxsize=\"100\" maxrolls=\"5\" archivetype=\"none\"/>\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestrollingFileWriter, _ = newRollingFileWriterSize(testLogFileName, rollingArchiveNone, \"\", 100, 5, rollingNameModePostfix)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Rolling file writer date\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<rollingfile type=\"date\" filename=\"` + testLogFileName + `\" datepattern=\"2006-01-02T15:04:05Z07:00\" />\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestrollingFileWriterTime, _ := newRollingFileWriterTime(testLogFileName, rollingArchiveNone, \"\", 0, \"2006-01-02T15:04:05Z07:00\", rollingIntervalAny, rollingNameModePostfix)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriterTime})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Buffered writer\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<buffered size=\"100500\" flushperiod=\"100\">\n\t\t\t\t\t<rollingfile type=\"date\" filename=\"` + testLogFileName + `\" datepattern=\"2006-01-02T15:04:05Z07:00\" />\n\t\t\t\t</buffered>\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestrollingFileWriterTime, _ = newRollingFileWriterTime(testLogFileName, rollingArchiveNone, \"\", 0, \"2006-01-02T15:04:05Z07:00\", rollingIntervalDaily, rollingNameModePostfix)\n\t\ttestbufferedWriter, _ := newBufferedWriter(testrollingFileWriterTime, 100500, 100)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testbufferedWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Inner splitter output\"\n\t\ttestLogFileName1 := getTestFileName(testName, \"1\")\n\t\ttestLogFileName2 := getTestFileName(testName, \"2\")\n\t\ttestLogFileName3 := getTestFileName(testName, \"3\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<file path=\"` + testLogFileName1 + `\"/>\n\t\t\t\t<splitter>\n\t\t\t\t\t<file path=\"` + testLogFileName2 + `\"/>\n\t\t\t\t\t<file path=\"` + testLogFileName3 + `\"/>\n\t\t\t\t</splitter>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestfileWriter1, _ := newFileWriter(testLogFileName2)\n\t\ttestfileWriter2, _ := newFileWriter(testLogFileName3)\n\t\ttestInnerSplitter, _ := newSplitDispatcher(defaultformatter, []interface{}{testfileWriter1, testfileWriter2})\n\t\ttestfileWriter, _ = newFileWriter(testLogFileName1)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testfileWriter, testInnerSplitter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\tRegisterReceiver(\"custom-name-1\", &customTestReceiver{})\n\n\t\ttestName = \"Custom receiver 1\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<custom name=\"custom-name-1\" data-test=\"set\"/>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestCustomReceiver, _ := newCustomReceiverDispatcher(defaultformatter, \"custom-name-1\", CustomReceiverInitArgs{\n\t\t\tXmlCustomAttrs: map[string]string{\n\t\t\t\t\"test\": \"set\",\n\t\t\t},\n\t\t})\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceiver})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Custom receiver 2\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<custom name=\"custom-name-2\" data-test=\"set2\"/>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\tcrec := &customTestReceiver{}\n\t\tcargs := CustomReceiverInitArgs{\n\t\t\tXmlCustomAttrs: map[string]string{\n\t\t\t\t\"test\": \"set2\",\n\t\t\t},\n\t\t}\n\t\tcrec.AfterParse(cargs)\n\t\ttestCustomReceiver2, _ := newCustomReceiverDispatcherByValue(defaultformatter, crec, \"custom-name-2\", cargs)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceiver2})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tfnc := func(initArgs CustomReceiverInitArgs) (CustomReceiver, error) {\n\t\t\treturn &customTestReceiver{}, nil\n\t\t}\n\t\tcfg := CfgParseParams{\n\t\t\tCustomReceiverProducers: map[string]CustomReceiverProducer{\n\t\t\t\t\"custom-name-2\": CustomReceiverProducer(fnc),\n\t\t\t},\n\t\t}\n\t\ttestExpected.Params = &cfg\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, &cfg})\n\n\t\tRegisterReceiver(\"-\", &customTestReceiver{})\n\t\ttestName = \"Custom receiver 3\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<custom name=\"-\" data-test=\"set3\"/>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\tcreccustom := &customTestReceiver{}\n\t\tcargs3 := CustomReceiverInitArgs{\n\t\t\tXmlCustomAttrs: map[string]string{\n\t\t\t\t\"test\": \"set3\",\n\t\t\t},\n\t\t}\n\t\tcreccustom.AfterParse(cargs3)\n\t\ttestCustomReceiver, _ = newCustomReceiverDispatcherByValue(defaultformatter, creccustom, \"-\", cargs3)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceiver})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Custom receivers with formats\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<custom name=\"custom-name-1\" data-test=\"set1\"/>\n\t\t\t\t<custom name=\"custom-name-1\" data-test=\"set2\"/>\n\t\t\t\t<custom name=\"custom-name-1\" data-test=\"set3\"/>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestCustomReceivers := make([]*customReceiverDispatcher, 3)\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ttestCustomReceivers[i], _ = newCustomReceiverDispatcher(defaultformatter, \"custom-name-1\", CustomReceiverInitArgs{\n\t\t\t\tXmlCustomAttrs: map[string]string{\n\t\t\t\t\t\"test\": fmt.Sprintf(\"set%d\", i+1),\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceivers[0], testCustomReceivers[1], testCustomReceivers[2]})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Format\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs formatid=\"dateFormat\">\n\t\t\t\t<file path=\"` + testLogFileName + `\"/>\n\t\t\t</outputs>\n\t\t\t<formats>\n\t\t\t\t<format id=\"dateFormat\" format=\"%Level %Msg %File\" />\n\t\t\t</formats>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestfileWriter, _ = newFileWriter(testLogFileName)\n\t\ttestFormat, _ := newFormatter(\"%Level %Msg %File\")\n\t\ttestHeadSplitter, _ = newSplitDispatcher(testFormat, []interface{}{testfileWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Format2\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestLogFileName1 = getTestFileName(testName, \"1\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs formatid=\"format1\">\n\t\t\t\t<file path=\"` + testLogFileName + `\"/>\n\t\t\t\t<file formatid=\"format2\" path=\"` + testLogFileName1 + `\"/>\n\t\t\t</outputs>\n\t\t\t<formats>\n\t\t\t\t<format id=\"format1\" format=\"%Level %Msg %File\" />\n\t\t\t\t<format id=\"format2\" format=\"%l %Msg\" />\n\t\t\t</formats>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestfileWriter, _ = newFileWriter(testLogFileName)\n\t\ttestfileWriter1, _ = newFileWriter(testLogFileName1)\n\t\ttestFormat1, _ := newFormatter(\"%Level %Msg %File\")\n\t\ttestFormat2, _ := newFormatter(\"%l %Msg\")\n\t\tformattedWriter, _ := newFormattedWriter(testfileWriter1, testFormat2)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(testFormat1, []interface{}{testfileWriter, formattedWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Minlevel = warn\"\n\t\ttestConfig = `<seelog minlevel=\"warn\"/>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(WarnLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Maxlevel = trace\"\n\t\ttestConfig = `<seelog maxlevel=\"trace\"/>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, TraceLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Level between info and error\"\n\t\ttestConfig = `<seelog minlevel=\"info\" maxlevel=\"error\"/>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(InfoLvl, ErrorLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Off with minlevel\"\n\t\ttestConfig = `<seelog minlevel=\"off\"/>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newOffConstraints()\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Off with levels\"\n\t\ttestConfig = `<seelog levels=\"off\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Levels list\"\n\t\ttestConfig = `<seelog levels=\"debug, info, critical\"/>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newListConstraints([]LogLevel{\n\t\t\tDebugLvl, InfoLvl, CriticalLvl})\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = asyncLooploggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Errors #1\"\n\t\ttestConfig = `<seelog minlevel=\"debug\" minlevel=\"trace\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #2\"\n\t\ttestConfig = `<seelog minlevel=\"error\" maxlevel=\"debug\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #3\"\n\t\ttestConfig = `<seelog maxlevel=\"debug\" maxlevel=\"trace\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #4\"\n\t\ttestConfig = `<seelog maxlevel=\"off\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #5\"\n\t\ttestConfig = `<seelog minlevel=\"off\" maxlevel=\"trace\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #6\"\n\t\ttestConfig = `<seelog minlevel=\"warn\" maxlevel=\"error\" levels=\"debug\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #7\"\n\t\ttestConfig = `<not_seelog/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #8\"\n\t\ttestConfig = `<seelog levels=\"warn, debug, test\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #9\"\n\t\ttestConfig = `<seelog levels=\"\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #10\"\n\t\ttestConfig = `<seelog levels=\"off\" something=\"abc\"/>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #11\"\n\t\ttestConfig = `<seelog><output/></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #12\"\n\t\ttestConfig = `<seelog><outputs/><outputs/></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #13\"\n\t\ttestConfig = `<seelog><exceptions/></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #14\"\n\t\ttestConfig = `<seelog><formats/></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #15\"\n\t\ttestConfig = `<seelog><outputs><splitter/></outputs></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #16\"\n\t\ttestConfig = `<seelog><outputs><filter/></outputs></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #17\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `<seelog><outputs><file path=\"` + testLogFileName + `\"><something/></file></outputs></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #18\"\n\t\ttestConfig = `<seelog><outputs><buffered size=\"100500\" flushperiod=\"100\"/></outputs></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #19\"\n\t\ttestConfig = `<seelog><outputs></outputs></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Exceptions: restricting\"\n\t\ttestConfig =\n\t\t\t`\n\t\t<seelog type=\"sync\">\n\t\t\t<exceptions>\n\t\t\t\t<exception funcpattern=\"Test*\" filepattern=\"someFile.go\" minlevel=\"off\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\tlistConstraint, _ := newOffConstraints()\n\t\texception, _ := newLogLevelException(\"Test*\", \"someFile.go\", listConstraint)\n\t\ttestExpected.Exceptions = []*logLevelException{exception}\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Exceptions: allowing #1\"\n\t\ttestConfig =\n\t\t\t`\n\t\t<seelog type=\"sync\" levels=\"error\">\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"trace\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newListConstraints([]LogLevel{ErrorLvl})\n\t\tminMaxConstraint, _ := newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\texception, _ = newLogLevelException(\"*\", \"testfile.go\", minMaxConstraint)\n\t\ttestExpected.Exceptions = []*logLevelException{exception}\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Exceptions: allowing #2\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" levels=\"off\">\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"warn\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newOffConstraints()\n\t\tminMaxConstraint, _ = newMinMaxConstraints(WarnLvl, CriticalLvl)\n\t\texception, _ = newLogLevelException(\"*\", \"testfile.go\", minMaxConstraint)\n\t\ttestExpected.Exceptions = []*logLevelException{exception}\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Predefined formats\"\n\t\tformatID := predefinedPrefix + \"xml-debug-short\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs formatid=\"` + formatID + `\">\n\t\t\t\t<console />\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestconsoleWriter, _ = newConsoleWriter()\n\t\ttestFormat, _ = predefinedFormats[formatID]\n\t\ttestHeadSplitter, _ = newSplitDispatcher(testFormat, []interface{}{testconsoleWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Predefined formats redefine\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\tformatID = predefinedPrefix + \"xml-debug-short\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs formatid=\"` + formatID + `\">\n\t\t\t\t<file path=\"` + testLogFileName + `\"/>\n\t\t\t</outputs>\n\t\t\t<formats>\n\t\t\t\t<format id=\"` + formatID + `\" format=\"%Level %Msg %File\" />\n\t\t\t</formats>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestfileWriter, _ = newFileWriter(testLogFileName)\n\t\ttestFormat, _ = newFormatter(\"%Level %Msg %File\")\n\t\ttestHeadSplitter, _ = newSplitDispatcher(testFormat, []interface{}{testfileWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Conn writer 1\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<conn net=\"tcp\" addr=\":8888\" />\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestConnWriter := newConnWriter(\"tcp\", \":8888\", false)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testConnWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Conn writer 2\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<conn net=\"tcp\" addr=\":8888\" reconnectonmsg=\"true\" />\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestConnWriter = newConnWriter(\"tcp\", \":8888\", true)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testConnWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t\ttestName = \"Errors #11\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\"><exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"trace\"/>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"warn\"/>\n\t\t</exceptions></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #12\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\"><exceptions>\n\t\t\t\t<exception filepattern=\"!@+$)!!%&@(^$\" minlevel=\"trace\"/>\n\t\t</exceptions></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #13\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\"><exceptions>\n\t\t\t\t<exception filepattern=\"*\" minlevel=\"unknown\"/>\n\t\t</exceptions></seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #14\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" levels=”off”>\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"off\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #15\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" levels=”trace”>\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" levels=\"trace\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #16\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" minlevel=”trace”>\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"trace\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #17\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" minlevel=”trace”>\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"warn\"/>\n\t\t\t</exceptions>\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\" minlevel=\"warn\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #18\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" minlevel=”trace”>\n\t\t\t<exceptions>\n\t\t\t\t<exception filepattern=\"testfile.go\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #19\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" minlevel=”trace”>\n\t\t\t<exceptions>\n\t\t\t\t<exception minlevel=\"warn\"/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #20\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\" minlevel=”trace”>\n\t\t\t<exceptions>\n\t\t\t\t<exception/>\n\t\t\t</exceptions>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #21\"\n\t\ttestConfig = `\n\t\t<seelog>\n\t\t\t<outputs>\n\t\t\t\t<splitter>\n\t\t\t\t</splitter>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #22\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<filter levels=\"debug, info, critical\">\n\n\t\t\t\t</filter>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #23\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<buffered size=\"100500\" flushperiod=\"100\">\n\n\t\t\t\t</buffered>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #24\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<buffered size=\"100500\" flushperiod=\"100\">\n\t\t\t\t\t<rollingfile type=\"date\" filename=\"` + testLogFileName + `\" datepattern=\"2006-01-02T15:04:05Z07:00\" formatid=\"testFormat\"/>\n\t\t\t\t</buffered>\n\t\t\t</outputs>\n\t\t\t<formats>\n\t\t\t\t<format id=\"testFormat\" format=\"%Level %Msg %File 123\" />\n\t\t\t</formats>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #25\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<outputs>\n\t\t\t\t\t<file path=\"` + testLogFileName + `\"/>\n\t\t\t\t</outputs>\n\t\t\t\t<outputs>\n\t\t\t\t\t<file path=\"` + testLogFileName + `\"/>\n\t\t\t\t</outputs>\n\t\t\t</outputs>\n\t\t</seelog>\n\t\t`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Errors #26\"\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<conn net=\"tcp\" addr=\":8888\" reconnectonmsg=\"true1\" />\n\t\t\t</outputs>\n\t\t</seelog>`\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil})\n\n\t\ttestName = \"Buffered writer same formatid override\"\n\t\ttestLogFileName = getTestFileName(testName, \"\")\n\t\ttestConfig = `\n\t\t<seelog type=\"sync\">\n\t\t\t<outputs>\n\t\t\t\t<buffered size=\"100500\" flushperiod=\"100\" formatid=\"testFormat\">\n\t\t\t\t\t<rollingfile namemode=\"prefix\" type=\"date\" filename=\"` + testLogFileName + `\" datepattern=\"2006-01-02T15:04:05Z07:00\" formatid=\"testFormat\"/>\n\t\t\t\t</buffered>\n\t\t\t</outputs>\n\t\t\t<formats>\n\t\t\t\t<format id=\"testFormat\" format=\"%Level %Msg %File 123\" />\n\t\t\t</formats>\n\t\t</seelog>`\n\t\ttestExpected = new(logConfig)\n\t\ttestExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl)\n\t\ttestExpected.Exceptions = nil\n\t\ttestrollingFileWriterTime, _ = newRollingFileWriterTime(testLogFileName, rollingArchiveNone, \"\", 0, \"2006-01-02T15:04:05Z07:00\", rollingIntervalDaily, rollingNameModePrefix)\n\t\ttestbufferedWriter, _ = newBufferedWriter(testrollingFileWriterTime, 100500, 100)\n\t\ttestFormat, _ = newFormatter(\"%Level %Msg %File 123\")\n\t\tformattedWriter, _ = newFormattedWriter(testbufferedWriter, testFormat)\n\t\ttestHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{formattedWriter})\n\t\ttestExpected.LogType = syncloggerTypeFromString\n\t\ttestExpected.RootDispatcher = testHeadSplitter\n\t\tparserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil})\n\n\t}\n\n\treturn parserTests\n}\n\n// Temporary solution: compare by string identity. Not the best solution in\n// terms of performance, but a valid one in terms of comparison, because\n// every seelog dispatcher/receiver must have a valid String() func\n// that fully represents its internal parameters.\nfunc configsAreEqual(conf1 *logConfig, conf2 interface{}) bool {\n\tif conf1 == nil {\n\t\treturn conf2 == nil\n\t}\n\tif conf2 == nil {\n\t\treturn conf1 == nil\n\t}\n\n\t// logConfig, ok := conf2 //.(*logConfig)\n\t// if !ok {\n\t// \treturn false\n\t// }\n\n\treturn fmt.Sprintf(\"%v\", conf1) == fmt.Sprintf(\"%v\", conf2) //logConfig)\n}\n\nfunc testLogFileFilter(fn string) bool {\n\treturn \".log\" == filepath.Ext(fn)\n}\n\nfunc cleanupAfterCfgTest(t *testing.T) {\n\ttoDel, err := getDirFilePaths(\".\", testLogFileFilter, true)\n\tif nil != err {\n\t\tt.Fatal(\"Cannot list files in test directory!\")\n\t}\n\n\tfor _, p := range toDel {\n\t\terr = tryRemoveFile(p)\n\t\tif nil != err {\n\t\t\tt.Errorf(\"cannot remove file %s in test directory: %s\", p, err.Error())\n\t\t}\n\t}\n}\n\nfunc parseTest(test parserTest, t *testing.T) {\n\tconf, err := configFromReaderWithConfig(strings.NewReader(test.config), test.parserConfig)\n\tif /*err != nil &&*/ conf != nil && conf.RootDispatcher != nil {\n\t\tdefer func() {\n\t\t\tif err = conf.RootDispatcher.Close(); err != nil {\n\t\t\t\tt.Errorf(\"\\n----ERROR while closing root dispatcher in %s test: %s\", test.testName, err)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif (err != nil) != test.errorExpected {\n\t\tt.Errorf(\"\\n----ERROR in %s:\\nConfig: %s\\n* Expected error:%t. Got error: %t\\n\",\n\t\t\ttest.testName, test.config, test.errorExpected, (err != nil))\n\t\tif err != nil {\n\t\t\tt.Logf(\"%s\\n\", err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tif err == nil && !configsAreEqual(conf, test.expected) {\n\t\tt.Errorf(\"\\n----ERROR in %s:\\nConfig: %s\\n* Expected: %v. \\n* Got: %v\\n\",\n\t\t\ttest.testName, test.config, test.expected, conf)\n\t}\n}\n\nfunc TestParser(t *testing.T) {\n\tdefer cleanupAfterCfgTest(t)\n\n\tfor _, test := range getParserTests() {\n\t\tparseTest(test, t)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_closer.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_constraints.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\n// Represents constraints which form a general rule for log levels selection\ntype logLevelConstraints interface {\n\tIsAllowed(level LogLevel) bool\n}\n\n// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels.\ntype minMaxConstraints struct {\n\tmin LogLevel\n\tmax LogLevel\n}\n\n// newMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels.\nfunc newMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) {\n\tif min > max {\n\t\treturn nil, fmt.Errorf(\"min level can't be greater than max. Got min: %d, max: %d\", min, max)\n\t}\n\tif min < TraceLvl || min > CriticalLvl {\n\t\treturn nil, fmt.Errorf(\"min level can't be less than Trace or greater than Critical. Got min: %d\", min)\n\t}\n\tif max < TraceLvl || max > CriticalLvl {\n\t\treturn nil, fmt.Errorf(\"max level can't be less than Trace or greater than Critical. Got max: %d\", max)\n\t}\n\n\treturn &minMaxConstraints{min, max}, nil\n}\n\n// IsAllowed returns true, if log level is in [min, max] range (inclusive).\nfunc (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool {\n\treturn level >= minMaxConstr.min && level <= minMaxConstr.max\n}\n\nfunc (minMaxConstr *minMaxConstraints) String() string {\n\treturn fmt.Sprintf(\"Min: %s. Max: %s\", minMaxConstr.min, minMaxConstr.max)\n}\n\n//=======================================================\n\n// A listConstraints represents constraints which use allowed log levels list.\ntype listConstraints struct {\n\tallowedLevels map[LogLevel]bool\n}\n\n// newListConstraints creates a new listConstraints struct with the specified allowed levels.\nfunc newListConstraints(allowList []LogLevel) (*listConstraints, error) {\n\tif allowList == nil {\n\t\treturn nil, errors.New(\"list can't be nil\")\n\t}\n\n\tallowLevels, err := createMapFromList(allowList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = validateOffLevel(allowLevels)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &listConstraints{allowLevels}, nil\n}\n\nfunc (listConstr *listConstraints) String() string {\n\tallowedList := \"List: \"\n\n\tlistLevel := make([]string, len(listConstr.allowedLevels))\n\n\tvar logLevel LogLevel\n\ti := 0\n\tfor logLevel = TraceLvl; logLevel <= Off; logLevel++ {\n\t\tif listConstr.allowedLevels[logLevel] {\n\t\t\tlistLevel[i] = logLevel.String()\n\t\t\ti++\n\t\t}\n\t}\n\n\tallowedList += strings.Join(listLevel, \",\")\n\n\treturn allowedList\n}\n\nfunc createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) {\n\tallowedLevels := make(map[LogLevel]bool, 0)\n\tfor _, level := range allowedList {\n\t\tif level < TraceLvl || level > Off {\n\t\t\treturn nil, fmt.Errorf(\"level can't be less than Trace or greater than Critical. Got level: %d\", level)\n\t\t}\n\t\tallowedLevels[level] = true\n\t}\n\treturn allowedLevels, nil\n}\nfunc validateOffLevel(allowedLevels map[LogLevel]bool) error {\n\tif _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 {\n\t\treturn errors.New(\"logLevel Off cant be mixed with other levels\")\n\t}\n\n\treturn nil\n}\n\n// IsAllowed returns true, if log level is in allowed log levels list.\n// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values.\nfunc (listConstr *listConstraints) IsAllowed(level LogLevel) bool {\n\tfor l := range listConstr.allowedLevels {\n\t\tif l == level && level != Off {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// AllowedLevels returns allowed levels configuration as a map.\nfunc (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool {\n\treturn listConstr.allowedLevels\n}\n\n//=======================================================\n\ntype offConstraints struct {\n}\n\nfunc newOffConstraints() (*offConstraints, error) {\n\treturn &offConstraints{}, nil\n}\n\nfunc (offConstr *offConstraints) IsAllowed(level LogLevel) bool {\n\treturn false\n}\n\nfunc (offConstr *offConstraints) String() string {\n\treturn \"Off constraint\"\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_constraints_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"testing\"\n)\n\nfunc TestInvalidminMaxConstraints(t *testing.T) {\n\tconstr, err := newMinMaxConstraints(CriticalLvl, WarnLvl)\n\n\tif err == nil || constr != nil {\n\t\tt.Errorf(\"expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v\",\n\t\t\tCriticalLvl, WarnLvl, err, constr)\n\t\treturn\n\t}\n}\n\nfunc TestInvalidLogLevels(t *testing.T) {\n\tvar invalidMin uint8 = 123\n\tvar invalidMax uint8 = 124\n\tminMaxConstr, errMinMax := newMinMaxConstraints(LogLevel(invalidMin), LogLevel(invalidMax))\n\n\tif errMinMax == nil || minMaxConstr != nil {\n\t\tt.Errorf(\"expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v\",\n\t\t\tinvalidMin, invalidMax, errMinMax, minMaxConstr)\n\t\treturn\n\t}\n\n\tinvalidList := []LogLevel{145}\n\n\tlistConstr, errList := newListConstraints(invalidList)\n\n\tif errList == nil || listConstr != nil {\n\t\tt.Errorf(\"expected an error and a nil value for constraints list: %v. Got: %v, %v\",\n\t\t\tinvalidList, errList, listConstr)\n\t\treturn\n\t}\n}\n\nfunc TestlistConstraintsWithDuplicates(t *testing.T) {\n\tduplicateList := []LogLevel{TraceLvl, DebugLvl, InfoLvl,\n\t\tWarnLvl, ErrorLvl, CriticalLvl, CriticalLvl, CriticalLvl}\n\n\tlistConstr, errList := newListConstraints(duplicateList)\n\n\tif errList != nil || listConstr == nil {\n\t\tt.Errorf(\"expected a valid constraints list struct for: %v, got error: %v, value: %v\",\n\t\t\tduplicateList, errList, listConstr)\n\t\treturn\n\t}\n\n\tlistLevels := listConstr.AllowedLevels()\n\n\tif listLevels == nil {\n\t\tt.Fatalf(\"listConstr.AllowedLevels() == nil\")\n\t\treturn\n\t}\n\n\tif len(listLevels) != 6 {\n\t\tt.Errorf(\"expected: listConstr.AllowedLevels() length == 6. Got: %d\", len(listLevels))\n\t\treturn\n\t}\n}\n\nfunc TestlistConstraintsWithOffInList(t *testing.T) {\n\toffList := []LogLevel{TraceLvl, DebugLvl, Off}\n\n\tlistConstr, errList := newListConstraints(offList)\n\n\tif errList == nil || listConstr != nil {\n\t\tt.Errorf(\"expected an error and a nil value for constraints list with 'Off':  %v. Got: %v, %v\",\n\t\t\toffList, errList, listConstr)\n\t\treturn\n\t}\n}\n\ntype logLevelTestCase struct {\n\tlevel   LogLevel\n\tallowed bool\n}\n\nvar minMaxTests = []logLevelTestCase{\n\t{TraceLvl, false},\n\t{DebugLvl, false},\n\t{InfoLvl, true},\n\t{WarnLvl, true},\n\t{ErrorLvl, false},\n\t{CriticalLvl, false},\n\t{123, false},\n\t{6, false},\n}\n\nfunc TestValidminMaxConstraints(t *testing.T) {\n\n\tconstr, err := newMinMaxConstraints(InfoLvl, WarnLvl)\n\n\tif err != nil || constr == nil {\n\t\tt.Errorf(\"expected a valid constraints struct for minmax constraints: min = %d, max = %d. Got: %v, %v\",\n\t\t\tInfoLvl, WarnLvl, err, constr)\n\t\treturn\n\t}\n\n\tfor _, minMaxTest := range minMaxTests {\n\t\tallowed := constr.IsAllowed(minMaxTest.level)\n\t\tif allowed != minMaxTest.allowed {\n\t\t\tt.Errorf(\"expected IsAllowed() = %t for level = %d. Got: %t\",\n\t\t\t\tminMaxTest.allowed, minMaxTest.level, allowed)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nvar listTests = []logLevelTestCase{\n\t{TraceLvl, true},\n\t{DebugLvl, false},\n\t{InfoLvl, true},\n\t{WarnLvl, true},\n\t{ErrorLvl, false},\n\t{CriticalLvl, true},\n\t{123, false},\n\t{6, false},\n}\n\nfunc TestValidlistConstraints(t *testing.T) {\n\tvalidList := []LogLevel{TraceLvl, InfoLvl, WarnLvl, CriticalLvl}\n\tconstr, err := newListConstraints(validList)\n\n\tif err != nil || constr == nil {\n\t\tt.Errorf(\"expected a valid constraints list struct for: %v. Got error: %v, value: %v\",\n\t\t\tvalidList, err, constr)\n\t\treturn\n\t}\n\n\tfor _, minMaxTest := range listTests {\n\t\tallowed := constr.IsAllowed(minMaxTest.level)\n\t\tif allowed != minMaxTest.allowed {\n\t\t\tt.Errorf(\"expected IsAllowed() = %t for level = %d. Got: %t\",\n\t\t\t\tminMaxTest.allowed, minMaxTest.level, allowed)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nvar offTests = []logLevelTestCase{\n\t{TraceLvl, false},\n\t{DebugLvl, false},\n\t{InfoLvl, false},\n\t{WarnLvl, false},\n\t{ErrorLvl, false},\n\t{CriticalLvl, false},\n\t{123, false},\n\t{6, false},\n}\n\nfunc TestValidListoffConstraints(t *testing.T) {\n\tvalidList := []LogLevel{Off}\n\tconstr, err := newListConstraints(validList)\n\n\tif err != nil || constr == nil {\n\t\tt.Errorf(\"expected a valid constraints list struct for: %v. Got error: %v, value: %v\",\n\t\t\tvalidList, err, constr)\n\t\treturn\n\t}\n\n\tfor _, minMaxTest := range offTests {\n\t\tallowed := constr.IsAllowed(minMaxTest.level)\n\t\tif allowed != minMaxTest.allowed {\n\t\t\tt.Errorf(\"expected IsAllowed() = %t for level = %d. Got: %t\",\n\t\t\t\tminMaxTest.allowed, minMaxTest.level, allowed)\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_context.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar workingDir = \"\"\n\nfunc init() {\n\tsetWorkDir()\n}\n\nfunc setWorkDir() {\n\tworkDir, workingDirError := os.Getwd()\n\tif workingDirError != nil {\n\t\tworkingDir = string(os.PathSeparator)\n\t\treturn\n\t}\n\n\tworkingDir = workDir + string(os.PathSeparator)\n}\n\n// Represents runtime caller context\ntype LogContextInterface interface {\n\t// Caller func name\n\tFunc() string\n\t// Caller line num\n\tLine() int\n\t// Caller file short path\n\tShortPath() string\n\t// Caller file full path\n\tFullPath() string\n\t// Caller file name (without path)\n\tFileName() string\n\t// True if the context is correct and may be used.\n\t// If false, then an error in context evaluation occurred and\n\t// all its other data may be corrupted.\n\tIsValid() bool\n\t// Time when log func was called\n\tCallTime() time.Time\n}\n\n// Returns context of the caller\nfunc currentContext() (LogContextInterface, error) {\n\treturn specificContext(1)\n}\n\nfunc extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, lineNumber int, err error) {\n\tpc, fullPath, line, ok := runtime.Caller(skip)\n\n\tif !ok {\n\t\treturn \"\", \"\", \"\", 0, errors.New(\"error during runtime.Caller\")\n\t}\n\n\t//TODO:Currently fixes bug in weekly.2012-03-13+: Caller returns incorrect separators\n\t//Delete later\n\n\tfullPath = strings.Replace(fullPath, \"\\\\\", string(os.PathSeparator), -1)\n\tfullPath = strings.Replace(fullPath, \"/\", string(os.PathSeparator), -1)\n\n\tif strings.HasPrefix(fullPath, workingDir) {\n\t\tshortPath = fullPath[len(workingDir):]\n\t} else {\n\t\tshortPath = fullPath\n\t}\n\n\tfunName := runtime.FuncForPC(pc).Name()\n\tvar functionName string\n\tif strings.HasPrefix(funName, workingDir) {\n\t\tfunctionName = funName[len(workingDir):]\n\t} else {\n\t\tfunctionName = funName\n\t}\n\n\treturn fullPath, shortPath, functionName, line, nil\n}\n\n// Returns context of the function with placed \"skip\" stack frames of the caller\n// If skip == 0 then behaves like currentContext\n// Context is returned in any situation, even if error occurs. But, if an error\n// occurs, the returned context is an error context, which contains no paths\n// or names, but states that they can't be extracted.\nfunc specificContext(skip int) (LogContextInterface, error) {\n\tcallTime := time.Now()\n\n\tif skip < 0 {\n\t\tnegativeStackFrameErr := errors.New(\"can not skip negative stack frames\")\n\t\treturn &errorContext{callTime, negativeStackFrameErr}, negativeStackFrameErr\n\t}\n\n\tfullPath, shortPath, function, line, err := extractCallerInfo(skip + 2)\n\tif err != nil {\n\t\treturn &errorContext{callTime, err}, err\n\t}\n\t_, fileName := filepath.Split(fullPath)\n\treturn &logContext{function, line, shortPath, fullPath, fileName, callTime}, nil\n}\n\n// Represents a normal runtime caller context\ntype logContext struct {\n\tfuncName  string\n\tline      int\n\tshortPath string\n\tfullPath  string\n\tfileName  string\n\tcallTime  time.Time\n}\n\nfunc (context *logContext) IsValid() bool {\n\treturn true\n}\n\nfunc (context *logContext) Func() string {\n\treturn context.funcName\n}\n\nfunc (context *logContext) Line() int {\n\treturn context.line\n}\n\nfunc (context *logContext) ShortPath() string {\n\treturn context.shortPath\n}\n\nfunc (context *logContext) FullPath() string {\n\treturn context.fullPath\n}\n\nfunc (context *logContext) FileName() string {\n\treturn context.fileName\n}\n\nfunc (context *logContext) CallTime() time.Time {\n\treturn context.callTime\n}\n\nconst (\n\terrorContextFunc      = \"Func() error:\"\n\terrorContextShortPath = \"ShortPath() error:\"\n\terrorContextFullPath  = \"FullPath() error:\"\n\terrorContextFileName  = \"FileName() error:\"\n)\n\n// Represents an error context\ntype errorContext struct {\n\terrorTime time.Time\n\terr       error\n}\n\nfunc (errContext *errorContext) IsValid() bool {\n\treturn false\n}\n\nfunc (errContext *errorContext) Line() int {\n\treturn -1\n}\n\nfunc (errContext *errorContext) Func() string {\n\treturn errorContextFunc + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) ShortPath() string {\n\treturn errorContextShortPath + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) FullPath() string {\n\treturn errorContextFullPath + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) FileName() string {\n\treturn errorContextFileName + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) CallTime() time.Time {\n\treturn errContext.errorTime\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_context_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst (\n\tshortPath = \"common_context_test.go\"\n)\n\nfunc init() {\n\t// Here we remove the hardcoding of the package name which breaks forks and some CI environments\n\t// such as jenkins\n\t_, _, funcName, _, _ := extractCallerInfo(1)\n\tcommonPrefix = funcName[:strings.Index(funcName, \"init·\")]\n}\n\nvar commonPrefix string\nvar testFullPath string\n\nfunc fullPath(t *testing.T) string {\n\tif testFullPath == \"\" {\n\t\twd, err := os.Getwd()\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot get working directory: %s\", err.Error())\n\t\t}\n\n\t\ttestFullPath = filepath.Join(wd, shortPath)\n\t}\n\n\treturn testFullPath\n}\n\nfunc TestContext(t *testing.T) {\n\n\tcontext, err := currentContext()\n\n\tnameFunc := commonPrefix + \"TestContext\"\n\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %s\", err.Error())\n\t}\n\n\tif context == nil {\n\t\tt.Fatalf(\"Expected: context != nil\")\n\t}\n\n\tif nf := context.Func(); nf != nameFunc {\n\t\t// Account for a case when the func full path is bigger than commonPrefix but includes it.\n\t\tif !strings.HasSuffix(nf, nameFunc) {\n\t\t\tt.Errorf(\"expected context.Func == %s ; got %s\", nameFunc, context.Func())\n\t\t}\n\t}\n\n\tif context.ShortPath() != shortPath {\n\t\tt.Errorf(\"expected context.ShortPath == %s ; got %s\", shortPath, context.ShortPath())\n\t}\n\n\tfp := fullPath(t)\n\n\tif context.FullPath() != fp {\n\t\tt.Errorf(\"expected context.FullPath == %s ; got %s\", fp, context.FullPath())\n\t}\n}\n\nfunc innerContext() (context LogContextInterface, err error) {\n\treturn currentContext()\n}\n\nfunc TestInnerContext(t *testing.T) {\n\tcontext, err := innerContext()\n\n\tnameFunc := commonPrefix + \"innerContext\"\n\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %s\", err.Error())\n\t}\n\n\tif context == nil {\n\t\tt.Fatalf(\"Expected: context != nil\")\n\t}\n\n\tif cf := context.Func(); cf != nameFunc {\n\t\t// Account for a case when the func full path is bigger than commonPrefix but includes it.\n\t\tif !strings.HasSuffix(cf, nameFunc) {\n\t\t\tt.Errorf(\"expected context.Func == %s ; got %s\", nameFunc, context.Func())\n\t\t}\n\t}\n\n\tif context.ShortPath() != shortPath {\n\t\tt.Errorf(\"expected context.ShortPath == %s ; got %s\", shortPath, context.ShortPath())\n\t}\n\n\tfp := fullPath(t)\n\n\tif context.FullPath() != fp {\n\t\tt.Errorf(\"expected context.FullPath == %s ; got %s\", fp, context.FullPath())\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_exception.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n// Used in rules creation to validate input file and func filters\nvar (\n\tfileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\\\/ _\\*\\.]*`)\n\tfuncFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\\*\\.]*`)\n)\n\n// logLevelException represents an exceptional case used when you need some specific files or funcs to\n// override general constraints and to use their own.\ntype logLevelException struct {\n\tfuncPatternParts []string\n\tfilePatternParts []string\n\n\tfuncPattern string\n\tfilePattern string\n\n\tconstraints logLevelConstraints\n}\n\n// newLogLevelException creates a new exception.\nfunc newLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*logLevelException, error) {\n\tif constraints == nil {\n\t\treturn nil, errors.New(\"constraints can not be nil\")\n\t}\n\n\texception := new(logLevelException)\n\n\terr := exception.initFuncPatternParts(funcPattern)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texception.funcPattern = strings.Join(exception.funcPatternParts, \"\")\n\n\terr = exception.initFilePatternParts(filePattern)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texception.filePattern = strings.Join(exception.filePatternParts, \"\")\n\n\texception.constraints = constraints\n\n\treturn exception, nil\n}\n\n// MatchesContext returns true if context matches the patterns of this logLevelException\nfunc (logLevelEx *logLevelException) MatchesContext(context LogContextInterface) bool {\n\treturn logLevelEx.match(context.Func(), context.FullPath())\n}\n\n// IsAllowed returns true if log level is allowed according to the constraints of this logLevelException\nfunc (logLevelEx *logLevelException) IsAllowed(level LogLevel) bool {\n\treturn logLevelEx.constraints.IsAllowed(level)\n}\n\n// FuncPattern returns the function pattern of a exception\nfunc (logLevelEx *logLevelException) FuncPattern() string {\n\treturn logLevelEx.funcPattern\n}\n\n// FuncPattern returns the file pattern of a exception\nfunc (logLevelEx *logLevelException) FilePattern() string {\n\treturn logLevelEx.filePattern\n}\n\n// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts\nfunc (logLevelEx *logLevelException) initFuncPatternParts(funcPattern string) (err error) {\n\n\tif funcFormatValidator.FindString(funcPattern) != funcPattern {\n\t\treturn errors.New(\"func path \\\"\" + funcPattern + \"\\\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)\")\n\t}\n\n\tlogLevelEx.funcPatternParts = splitPattern(funcPattern)\n\treturn nil\n}\n\n// Checks whether the file filter has a correct format and splits file patterns using splitPattern.\nfunc (logLevelEx *logLevelException) initFilePatternParts(filePattern string) (err error) {\n\n\tif fileFormatValidator.FindString(filePattern) != filePattern {\n\t\treturn errors.New(\"file path \\\"\" + filePattern + \"\\\" contains incorrect symbols. Only a-z A-Z 0-9 \\\\ / _ * . allowed)\")\n\t}\n\n\tlogLevelEx.filePatternParts = splitPattern(filePattern)\n\treturn err\n}\n\nfunc (logLevelEx *logLevelException) match(funcPath string, filePath string) bool {\n\tif !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) {\n\t\treturn false\n\t}\n\treturn stringMatchesPattern(logLevelEx.filePatternParts, filePath)\n}\n\nfunc (logLevelEx *logLevelException) String() string {\n\tstr := fmt.Sprintf(\"Func: %s File: %s \", logLevelEx.funcPattern, logLevelEx.filePattern)\n\n\tif logLevelEx.constraints != nil {\n\t\tstr += fmt.Sprintf(\"Constr: %s\", logLevelEx.constraints)\n\t} else {\n\t\tstr += \"nil\"\n\t}\n\n\treturn str\n}\n\n// splitPattern splits pattern into strings and asterisks. Example: \"ab*cde**f\" -> [\"ab\", \"*\", \"cde\", \"*\", \"f\"]\nfunc splitPattern(pattern string) []string {\n\tvar patternParts []string\n\tvar lastChar rune\n\tfor _, char := range pattern {\n\t\tif char == '*' {\n\t\t\tif lastChar != '*' {\n\t\t\t\tpatternParts = append(patternParts, \"*\")\n\t\t\t}\n\t\t} else {\n\t\t\tif len(patternParts) != 0 && lastChar != '*' {\n\t\t\t\tpatternParts[len(patternParts)-1] += string(char)\n\t\t\t} else {\n\t\t\t\tpatternParts = append(patternParts, string(char))\n\t\t\t}\n\t\t}\n\t\tlastChar = char\n\t}\n\n\treturn patternParts\n}\n\n// stringMatchesPattern check whether testString matches pattern with asterisks.\n// Standard regexp functionality is not used here because of performance issues.\nfunc stringMatchesPattern(patternparts []string, testString string) bool {\n\tif len(patternparts) == 0 {\n\t\treturn len(testString) == 0\n\t}\n\n\tpart := patternparts[0]\n\tif part != \"*\" {\n\t\tindex := strings.Index(testString, part)\n\t\tif index == 0 {\n\t\t\treturn stringMatchesPattern(patternparts[1:], testString[len(part):])\n\t\t}\n\t} else {\n\t\tif len(patternparts) == 1 {\n\t\t\treturn true\n\t\t}\n\n\t\tnewTestString := testString\n\t\tpart = patternparts[1]\n\t\tfor {\n\t\t\tindex := strings.Index(newTestString, part)\n\t\t\tif index == -1 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tnewTestString = newTestString[index+len(part):]\n\t\t\tresult := stringMatchesPattern(patternparts[2:], newTestString)\n\t\t\tif result {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_exception_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"testing\"\n)\n\ntype exceptionTestCase struct {\n\tfuncPattern string\n\tfilePattern string\n\tfuncName    string\n\tfileName    string\n\tmatch       bool\n}\n\nvar exceptionTestCases = []exceptionTestCase{\n\t{\"*\", \"*\", \"func\", \"file\", true},\n\t{\"func*\", \"*\", \"func\", \"file\", true},\n\t{\"*func\", \"*\", \"func\", \"file\", true},\n\t{\"*func\", \"*\", \"1func\", \"file\", true},\n\t{\"func*\", \"*\", \"func1\", \"file\", true},\n\t{\"fu*nc\", \"*\", \"func\", \"file\", true},\n\t{\"fu*nc\", \"*\", \"fu1nc\", \"file\", true},\n\t{\"fu*nc\", \"*\", \"func1nc\", \"file\", true},\n\t{\"*fu*nc*\", \"*\", \"somefuntonc\", \"file\", true},\n\t{\"fu*nc\", \"*\", \"f1nc\", \"file\", false},\n\t{\"func*\", \"*\", \"fun\", \"file\", false},\n\t{\"fu*nc\", \"*\", \"func1n\", \"file\", false},\n\t{\"**f**u**n**c**\", \"*\", \"func1n\", \"file\", true},\n}\n\nfunc TestMatchingCorrectness(t *testing.T) {\n\tconstraints, err := newListConstraints([]LogLevel{TraceLvl})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor _, testCase := range exceptionTestCases {\n\t\trule, ruleError := newLogLevelException(testCase.funcPattern, testCase.filePattern, constraints)\n\t\tif ruleError != nil {\n\t\t\tt.Fatalf(\"Unexpected error on rule creation: [ %v, %v ]. %v\",\n\t\t\t\ttestCase.funcPattern, testCase.filePattern, ruleError)\n\t\t}\n\n\t\tmatch := rule.match(testCase.funcName, testCase.fileName)\n\t\tif match != testCase.match {\n\t\t\tt.Errorf(\"incorrect matching for [ %v, %v ] [ %v, %v ] Expected: %t. Got: %t\",\n\t\t\t\ttestCase.funcPattern, testCase.filePattern, testCase.funcName, testCase.fileName, testCase.match, match)\n\t\t}\n\t}\n}\n\nfunc TestAsterisksReducing(t *testing.T) {\n\tconstraints, err := newListConstraints([]LogLevel{TraceLvl})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\trule, err := newLogLevelException(\"***func**\", \"fi*****le\", constraints)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\texpectFunc := \"*func*\"\n\tif rule.FuncPattern() != expectFunc {\n\t\tt.Errorf(\"asterisks must be reduced. Expect:%v, Got:%v\", expectFunc, rule.FuncPattern())\n\t}\n\n\texpectFile := \"fi*le\"\n\tif rule.FilePattern() != expectFile {\n\t\tt.Errorf(\"asterisks must be reduced. Expect:%v, Got:%v\", expectFile, rule.FilePattern())\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_flusher.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\n// flusherInterface represents all objects that have to do cleanup\n// at certain moments of time (e.g. before app shutdown to avoid data loss)\ntype flusherInterface interface {\n\tFlush()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/common_loglevel.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\n// Log level type\ntype LogLevel uint8\n\n// Log levels\nconst (\n\tTraceLvl = iota\n\tDebugLvl\n\tInfoLvl\n\tWarnLvl\n\tErrorLvl\n\tCriticalLvl\n\tOff\n)\n\n// Log level string representations (used in configuration files)\nconst (\n\tTraceStr    = \"trace\"\n\tDebugStr    = \"debug\"\n\tInfoStr     = \"info\"\n\tWarnStr     = \"warn\"\n\tErrorStr    = \"error\"\n\tCriticalStr = \"critical\"\n\tOffStr      = \"off\"\n)\n\nvar levelToStringRepresentations = map[LogLevel]string{\n\tTraceLvl:    TraceStr,\n\tDebugLvl:    DebugStr,\n\tInfoLvl:     InfoStr,\n\tWarnLvl:     WarnStr,\n\tErrorLvl:    ErrorStr,\n\tCriticalLvl: CriticalStr,\n\tOff:         OffStr,\n}\n\n// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull.\nfunc LogLevelFromString(levelStr string) (level LogLevel, found bool) {\n\tfor lvl, lvlStr := range levelToStringRepresentations {\n\t\tif lvlStr == levelStr {\n\t\t\treturn lvl, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\n// LogLevelToString returns seelog string representation for a specified level. Returns \"\" for invalid log levels.\nfunc (level LogLevel) String() string {\n\tlevelStr, ok := levelToStringRepresentations[level]\n\tif ok {\n\t\treturn levelStr\n\t}\n\n\treturn \"\"\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/dispatch_custom.go",
    "content": "// Copyright (c) 2013 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n)\n\nvar registeredReceivers = make(map[string]reflect.Type)\n\n// RegisterReceiver records a custom receiver type, identified by a value\n// of that type (second argument), under the specified name. Registered\n// names can be used in the \"name\" attribute of <custom> config items.\n//\n// RegisterReceiver takes the type of the receiver argument, without taking\n// the value into the account. So do NOT enter any data to the second argument\n// and only call it like:\n//     RegisterReceiver(\"somename\", &MyReceiverType{})\n//\n// After that, when a '<custom>' config tag with this name is used,\n// a receiver of the specified type would be instantiated. Check\n// CustomReceiver comments for interface details.\n//\n// NOTE 1: RegisterReceiver fails if you attempt to register different types\n// with the same name.\n//\n// NOTE 2: RegisterReceiver registers those receivers that must be used in\n// the configuration files (<custom> items). Basically it is just the way\n// you tell seelog config parser what should it do when it meets a\n// <custom> tag with a specific name and data attributes.\n//\n// But If you are only using seelog as a proxy to an already instantiated\n// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver.\nfunc RegisterReceiver(name string, receiver CustomReceiver) {\n\tnewType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface())\n\tif t, ok := registeredReceivers[name]; ok && t != newType {\n\t\tpanic(fmt.Sprintf(\"duplicate types for %s: %s != %s\", name, t, newType))\n\t}\n\tregisteredReceivers[name] = newType\n}\n\nfunc customReceiverByName(name string) (creceiver CustomReceiver, err error) {\n\trt, ok := registeredReceivers[name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"custom receiver name not registered: '%s'\", name)\n\t}\n\tv, ok := reflect.New(rt).Interface().(CustomReceiver)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cannot instantiate receiver with name='%s'\", name)\n\t}\n\treturn v, nil\n}\n\n// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init\n// func when custom receiver is being initialized.\ntype CustomReceiverInitArgs struct {\n\t// XmlCustomAttrs represent '<custom>' xml config item attributes that\n\t// start with \"data-\". Map keys will be the attribute names without the \"data-\".\n\t// Map values will the those attribute values.\n\t//\n\t// E.g. if you have a '<custom name=\"somename\" data-attr1=\"a1\" data-attr2=\"a2\"/>'\n\t// you will get map with 2 key-value pairs: \"attr1\"->\"a1\", \"attr2\"->\"a2\"\n\t//\n\t// Note that in custom items you can only use allowed attributes, like \"name\" and\n\t// your custom attributes, starting with \"data-\". Any other will lead to a\n\t// parsing error.\n\tXmlCustomAttrs map[string]string\n}\n\n// CustomReceiver is the interface that external custom seelog message receivers\n// must implement in order to be able to process seelog messages. Those receivers\n// are set in the xml config file using the <custom> tag. Check receivers reference\n// wiki section on that.\n//\n// Use seelog.RegisterReceiver on the receiver type before using it.\ntype CustomReceiver interface {\n\t// ReceiveMessage is called when the custom receiver gets seelog message from\n\t// a parent dispatcher.\n\t//\n\t// Message, level and context args represent all data that was included in the seelog\n\t// message at the time it was logged.\n\t//\n\t// The formatting is already applied to the message and depends on the config\n\t// like with any other receiver.\n\t//\n\t// If you would like to inform seelog of an error that happened during the handling of\n\t// the message, return a non-nil error. This way you'll end up seeing your error like\n\t// any other internal seelog error.\n\tReceiveMessage(message string, level LogLevel, context LogContextInterface) error\n\n\t// AfterParse is called immediately after your custom receiver is instantiated by\n\t// the xml config parser. So, if you need to do any startup logic after config parsing,\n\t// like opening file or allocating any resources after the receiver is instantiated, do it here.\n\t//\n\t// If this func returns a non-nil error, then the loading procedure will fail. E.g.\n\t// if you are loading a seelog xml config, the parser would not finish the loading\n\t// procedure and inform about an error like with any other config error.\n\t//\n\t// If your custom logger needs some configuration, you can use custom attributes in\n\t// your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments.\n\t//\n\t// IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used\n\t// to create seelog proxy logger using the custom receiver. This func is only called when\n\t// receiver is instantiated from a config.\n\tAfterParse(initArgs CustomReceiverInitArgs) error\n\n\t// Flush is called when the custom receiver gets a 'flush' directive from a\n\t// parent receiver. If custom receiver implements some kind of buffering or\n\t// queing, then the appropriate reaction on a flush message is synchronous\n\t// flushing of all those queues/buffers. If custom receiver doesn't have\n\t// such mechanisms, then flush implementation may be left empty.\n\tFlush()\n\n\t// Close is called when the custom receiver gets a 'close' directive from a\n\t// parent receiver. This happens when a top-level seelog dispatcher is sending\n\t// 'close' to all child nodes and it means that current seelog logger is being closed.\n\t// If you need to do any cleanup after your custom receiver is done, you should do\n\t// it here.\n\tClose() error\n}\n\ntype customReceiverDispatcher struct {\n\tformatter          *formatter\n\tinnerReceiver      CustomReceiver\n\tcustomReceiverName string\n\tusedArgs           CustomReceiverInitArgs\n}\n\n// newCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created\n// using a <custom> tag in the config file.\nfunc newCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {\n\tif formatter == nil {\n\t\treturn nil, errors.New(\"formatter cannot be nil\")\n\t}\n\tif len(customReceiverName) == 0 {\n\t\treturn nil, errors.New(\"custom receiver name cannot be empty\")\n\t}\n\n\tcreceiver, err := customReceiverByName(customReceiverName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = creceiver.AfterParse(cArgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdisp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs}\n\n\treturn disp, nil\n}\n\n// newCustomReceiverDispatcherByValue is basically the same as newCustomReceiverDispatcher, but using\n// a specific CustomReceiver value instead of instantiating a new one by type.\nfunc newCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {\n\tif formatter == nil {\n\t\treturn nil, errors.New(\"formatter cannot be nil\")\n\t}\n\tif customReceiver == nil {\n\t\treturn nil, errors.New(\"customReceiver cannot be nil\")\n\t}\n\tdisp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs}\n\n\treturn disp, nil\n}\n\n// CustomReceiver implementation. Check CustomReceiver comments.\nfunc (disp *customReceiverDispatcher) Dispatch(\n\tmessage string,\n\tlevel LogLevel,\n\tcontext LogContextInterface,\n\terrorFunc func(err error)) {\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\terrorFunc(fmt.Errorf(\"panic in custom receiver '%s'.Dispatch: %s\", reflect.TypeOf(disp.innerReceiver), err))\n\t\t}\n\t}()\n\n\terr := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context)\n\tif err != nil {\n\t\terrorFunc(err)\n\t}\n}\n\n// CustomReceiver implementation. Check CustomReceiver comments.\nfunc (disp *customReceiverDispatcher) Flush() {\n\tdisp.innerReceiver.Flush()\n}\n\n// CustomReceiver implementation. Check CustomReceiver comments.\nfunc (disp *customReceiverDispatcher) Close() error {\n\tdisp.innerReceiver.Flush()\n\n\terr := disp.innerReceiver.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (disp *customReceiverDispatcher) String() string {\n\tdatas := \"\"\n\tskeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs))\n\tfor i := range disp.usedArgs.XmlCustomAttrs {\n\t\tskeys = append(skeys, i)\n\t}\n\tsort.Strings(skeys)\n\tfor _, key := range skeys {\n\t\tdatas += fmt.Sprintf(\"<%s, %s> \", key, disp.usedArgs.XmlCustomAttrs[key])\n\t}\n\n\tstr := fmt.Sprintf(\"Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\\n\",\n\t\tdisp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver)\n\n\treturn str\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/dispatch_customdispatcher_test.go",
    "content": "// Copyright (c) 2013 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"testing\"\n)\n\ntype testCustomDispatcherMessageReceiver struct {\n\tcustomTestReceiver\n}\n\nfunc TestCustomDispatcher_Message(t *testing.T) {\n\trecName := \"TestCustomDispatcher_Message\"\n\tRegisterReceiver(recName, &testCustomDispatcherMessageReceiver{})\n\n\tcustomDispatcher, err := newCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{\n\t\tXmlCustomAttrs: map[string]string{\n\t\t\t\"test\": \"testdata\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tcontext, err := currentContext()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tbytes := []byte(\"Hello\")\n\tcustomDispatcher.Dispatch(string(bytes), TraceLvl, context, func(err error) {})\n\n\tcout := customDispatcher.innerReceiver.(*testCustomDispatcherMessageReceiver).customTestReceiver.co\n\tif cout.initCalled != true {\n\t\tt.Error(\"Init not called\")\n\t\treturn\n\t}\n\tif cout.dataPassed != \"testdata\" {\n\t\tt.Errorf(\"wrong data passed: '%s'\", cout.dataPassed)\n\t\treturn\n\t}\n\tif cout.messageOutput != string(bytes) {\n\t\tt.Errorf(\"wrong message output: '%s'\", cout.messageOutput)\n\t\treturn\n\t}\n\tif cout.levelOutput != TraceLvl {\n\t\tt.Errorf(\"wrong log level: '%s'\", cout.levelOutput)\n\t\treturn\n\t}\n\tif cout.flushed {\n\t\tt.Error(\"Flush was not expected\")\n\t\treturn\n\t}\n\tif cout.closed {\n\t\tt.Error(\"Closing was not expected\")\n\t\treturn\n\t}\n}\n\ntype testCustomDispatcherFlushReceiver struct {\n\tcustomTestReceiver\n}\n\nfunc TestCustomDispatcher_Flush(t *testing.T) {\n\trecName := \"TestCustomDispatcher_Flush\"\n\tRegisterReceiver(recName, &testCustomDispatcherFlushReceiver{})\n\n\tcustomDispatcher, err := newCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{\n\t\tXmlCustomAttrs: map[string]string{\n\t\t\t\"test\": \"testdata\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tcustomDispatcher.Flush()\n\n\tcout := customDispatcher.innerReceiver.(*testCustomDispatcherFlushReceiver).customTestReceiver.co\n\tif cout.initCalled != true {\n\t\tt.Error(\"Init not called\")\n\t\treturn\n\t}\n\tif cout.dataPassed != \"testdata\" {\n\t\tt.Errorf(\"wrong data passed: '%s'\", cout.dataPassed)\n\t\treturn\n\t}\n\tif cout.messageOutput != \"\" {\n\t\tt.Errorf(\"wrong message output: '%s'\", cout.messageOutput)\n\t\treturn\n\t}\n\tif cout.levelOutput != TraceLvl {\n\t\tt.Errorf(\"wrong log level: '%s'\", cout.levelOutput)\n\t\treturn\n\t}\n\tif !cout.flushed {\n\t\tt.Error(\"Flush was expected\")\n\t\treturn\n\t}\n\tif cout.closed {\n\t\tt.Error(\"Closing was not expected\")\n\t\treturn\n\t}\n}\n\ntype testCustomDispatcherCloseReceiver struct {\n\tcustomTestReceiver\n}\n\nfunc TestCustomDispatcher_Close(t *testing.T) {\n\trecName := \"TestCustomDispatcher_Close\"\n\tRegisterReceiver(recName, &testCustomDispatcherCloseReceiver{})\n\n\tcustomDispatcher, err := newCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{\n\t\tXmlCustomAttrs: map[string]string{\n\t\t\t\"test\": \"testdata\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tcustomDispatcher.Close()\n\n\tcout := customDispatcher.innerReceiver.(*testCustomDispatcherCloseReceiver).customTestReceiver.co\n\tif cout.initCalled != true {\n\t\tt.Error(\"Init not called\")\n\t\treturn\n\t}\n\tif cout.dataPassed != \"testdata\" {\n\t\tt.Errorf(\"wrong data passed: '%s'\", cout.dataPassed)\n\t\treturn\n\t}\n\tif cout.messageOutput != \"\" {\n\t\tt.Errorf(\"wrong message output: '%s'\", cout.messageOutput)\n\t\treturn\n\t}\n\tif cout.levelOutput != TraceLvl {\n\t\tt.Errorf(\"wrong log level: '%s'\", cout.levelOutput)\n\t\treturn\n\t}\n\tif !cout.flushed {\n\t\tt.Error(\"Flush was expected\")\n\t\treturn\n\t}\n\tif !cout.closed {\n\t\tt.Error(\"Closing was expected\")\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/dispatch_dispatcher.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\n// A dispatcherInterface is used to dispatch message to all underlying receivers.\n// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc.\n// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs\n// an immediate cleanup of all data that is stored in the receivers\ntype dispatcherInterface interface {\n\tflusherInterface\n\tio.Closer\n\tDispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error))\n}\n\ntype dispatcher struct {\n\tformatter   *formatter\n\twriters     []*formattedWriter\n\tdispatchers []dispatcherInterface\n}\n\n// Creates a dispatcher which dispatches data to a list of receivers.\n// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned\nfunc createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) {\n\tif formatter == nil {\n\t\treturn nil, errors.New(\"formatter cannot be nil\")\n\t}\n\tif receivers == nil || len(receivers) == 0 {\n\t\treturn nil, errors.New(\"receivers cannot be nil or empty\")\n\t}\n\n\tdisp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)}\n\tfor _, receiver := range receivers {\n\t\twriter, ok := receiver.(*formattedWriter)\n\t\tif ok {\n\t\t\tdisp.writers = append(disp.writers, writer)\n\t\t\tcontinue\n\t\t}\n\n\t\tioWriter, ok := receiver.(io.Writer)\n\t\tif ok {\n\t\t\twriter, err := newFormattedWriter(ioWriter, disp.formatter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdisp.writers = append(disp.writers, writer)\n\t\t\tcontinue\n\t\t}\n\n\t\tdispInterface, ok := receiver.(dispatcherInterface)\n\t\tif ok {\n\t\t\tdisp.dispatchers = append(disp.dispatchers, dispInterface)\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil, errors.New(\"method can receive either io.Writer or dispatcherInterface\")\n\t}\n\n\treturn disp, nil\n}\n\nfunc (disp *dispatcher) Dispatch(\n\tmessage string,\n\tlevel LogLevel,\n\tcontext LogContextInterface,\n\terrorFunc func(err error)) {\n\n\tfor _, writer := range disp.writers {\n\t\terr := writer.Write(message, level, context)\n\t\tif err != nil {\n\t\t\terrorFunc(err)\n\t\t}\n\t}\n\n\tfor _, dispInterface := range disp.dispatchers {\n\t\tdispInterface.Dispatch(message, level, context, errorFunc)\n\t}\n}\n\n// Flush goes through all underlying writers which implement flusherInterface interface\n// and closes them. Recursively performs the same action for underlying dispatchers\nfunc (disp *dispatcher) Flush() {\n\tfor _, disp := range disp.Dispatchers() {\n\t\tdisp.Flush()\n\t}\n\n\tfor _, formatWriter := range disp.Writers() {\n\t\tflusher, ok := formatWriter.Writer().(flusherInterface)\n\t\tif ok {\n\t\t\tflusher.Flush()\n\t\t}\n\t}\n}\n\n// Close goes through all underlying writers which implement io.Closer interface\n// and closes them. Recursively performs the same action for underlying dispatchers\n// Before closing, writers are flushed to prevent loss of any buffered data, so\n// a call to Flush() func before Close() is not necessary\nfunc (disp *dispatcher) Close() error {\n\tfor _, disp := range disp.Dispatchers() {\n\t\tdisp.Flush()\n\t\terr := disp.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, formatWriter := range disp.Writers() {\n\t\tflusher, ok := formatWriter.Writer().(flusherInterface)\n\t\tif ok {\n\t\t\tflusher.Flush()\n\t\t}\n\n\t\tcloser, ok := formatWriter.Writer().(io.Closer)\n\t\tif ok {\n\t\t\terr := closer.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (disp *dispatcher) Writers() []*formattedWriter {\n\treturn disp.writers\n}\n\nfunc (disp *dispatcher) Dispatchers() []dispatcherInterface {\n\treturn disp.dispatchers\n}\n\nfunc (disp *dispatcher) String() string {\n\tstr := \"formatter: \" + disp.formatter.String() + \"\\n\"\n\n\tstr += \"    ->Dispatchers:\"\n\n\tif len(disp.dispatchers) == 0 {\n\t\tstr += \"none\\n\"\n\t} else {\n\t\tstr += \"\\n\"\n\n\t\tfor _, disp := range disp.dispatchers {\n\t\t\tstr += fmt.Sprintf(\"        ->%s\", disp)\n\t\t}\n\t}\n\n\tstr += \"    ->Writers:\"\n\n\tif len(disp.writers) == 0 {\n\t\tstr += \"none\\n\"\n\t} else {\n\t\tstr += \"\\n\"\n\n\t\tfor _, writer := range disp.writers {\n\t\t\tstr += fmt.Sprintf(\"        ->%s\\n\", writer)\n\t\t}\n\t}\n\n\treturn str\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n)\n\n// A filterDispatcher writes the given message to underlying receivers only if message log level\n// is in the allowed list.\ntype filterDispatcher struct {\n\t*dispatcher\n\tallowList map[LogLevel]bool\n}\n\n// newFilterDispatcher creates a new filterDispatcher using a list of allowed levels.\nfunc newFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) {\n\tdisp, err := createDispatcher(formatter, receivers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallows := make(map[LogLevel]bool)\n\tfor _, allowLevel := range allowList {\n\t\tallows[allowLevel] = true\n\t}\n\n\treturn &filterDispatcher{disp, allows}, nil\n}\n\nfunc (filter *filterDispatcher) Dispatch(\n\tmessage string,\n\tlevel LogLevel,\n\tcontext LogContextInterface,\n\terrorFunc func(err error)) {\n\tisAllowed, ok := filter.allowList[level]\n\tif ok && isAllowed {\n\t\tfilter.dispatcher.Dispatch(message, level, context, errorFunc)\n\t}\n}\n\nfunc (filter *filterDispatcher) String() string {\n\treturn fmt.Sprintf(\"filterDispatcher ->\\n%s\", filter.dispatcher)\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/dispatch_filterdispatcher_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"testing\"\n)\n\nfunc TestfilterDispatcher_Pass(t *testing.T) {\n\twriter, _ := newBytesVerifier(t)\n\tfilter, err := newFilterDispatcher(onlyMessageFormatForTest, []interface{}{writer}, TraceLvl)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tcontext, err := currentContext()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tbytes := []byte(\"Hello\")\n\twriter.ExpectBytes(bytes)\n\tfilter.Dispatch(string(bytes), TraceLvl, context, func(err error) {})\n\twriter.MustNotExpect()\n}\n\nfunc TestfilterDispatcher_Deny(t *testing.T) {\n\twriter, _ := newBytesVerifier(t)\n\tfilter, err := newFilterDispatcher(defaultformatter, []interface{}{writer})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tcontext, err := currentContext()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tbytes := []byte(\"Hello\")\n\tfilter.Dispatch(string(bytes), TraceLvl, context, func(err error) {})\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n)\n\n// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.)\ntype splitDispatcher struct {\n\t*dispatcher\n}\n\nfunc newSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) {\n\tdisp, err := createDispatcher(formatter, receivers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &splitDispatcher{disp}, nil\n}\n\nfunc (splitter *splitDispatcher) String() string {\n\treturn fmt.Sprintf(\"splitDispatcher ->\\n%s\", splitter.dispatcher.String())\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/dispatch_splitdispatcher_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nvar onlyMessageFormatForTest *formatter\n\nfunc init() {\n\tvar err error\n\tonlyMessageFormatForTest, err = newFormatter(\"%Msg\")\n\tif err != nil {\n\t\tfmt.Println(\"Can not create only message format: \" + err.Error())\n\t}\n}\n\nfunc TestsplitDispatcher(t *testing.T) {\n\twriter1, _ := newBytesVerifier(t)\n\twriter2, _ := newBytesVerifier(t)\n\tspliter, err := newSplitDispatcher(onlyMessageFormatForTest, []interface{}{writer1, writer2})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tcontext, err := currentContext()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tbytes := []byte(\"Hello\")\n\n\twriter1.ExpectBytes(bytes)\n\twriter2.ExpectBytes(bytes)\n\tspliter.Dispatch(string(bytes), TraceLvl, context, func(err error) {})\n\twriter1.MustNotExpect()\n\twriter2.MustNotExpect()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/doc.go",
    "content": "// Copyright (c) 2014 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/*\nPackage seelog implements logging functionality with flexible dispatching, filtering, and formatting.\n\nCreation\n\nTo create a logger, use one of the following constructors:\n  func LoggerFromConfigAsBytes\n  func LoggerFromConfigAsFile\n  func LoggerFromConfigAsString\n  func LoggerFromWriterWithMinLevel\n  func LoggerFromWriterWithMinLevelAndFormat\n  func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers)\nExample:\n  import log \"github.com/cihub/seelog\"\n\n  func main() {\n      logger, err := log.LoggerFromConfigAsFile(\"seelog.xml\")\n      if err != nil {\n          panic(err)\n      }\n      defer logger.Flush()\n      ... use logger ...\n  }\nThe \"defer\" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some\nmessages when you close your application because they are processed in another non-blocking goroutine. To avoid that you\nexplicitly defer flushing all messages before closing.\n\nUsage\n\nLogger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs.\nExample:\n  import log \"github.com/cihub/seelog\"\n\n  func main() {\n      logger, err := log.LoggerFromConfigAsFile(\"seelog.xml\")\n      if err != nil {\n          panic(err)\n      }\n      defer logger.Flush()\n      logger.Trace(\"test\")\n      logger.Debugf(\"var = %s\", \"abc\")\n  }\n\nHaving loggers as variables is convenient if you are writing your own package with internal logging or if you have\nseveral loggers with different options.\nBut for most standalone apps it is more convenient to use package level funcs and vars. There is a package level\nvar 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs:\n  import log \"github.com/cihub/seelog\"\n\n  func main() {\n      logger, err := log.LoggerFromConfigAsFile(\"seelog.xml\")\n      if err != nil {\n          panic(err)\n      }\n      log.ReplaceLogger(logger)\n      defer log.Flush()\n      log.Trace(\"test\")\n      log.Debugf(\"var = %s\", \"abc\")\n  }\nLast lines\n      log.Trace(\"test\")\n      log.Debugf(\"var = %s\", \"abc\")\ndo the same as\n      log.Current.Trace(\"test\")\n      log.Current.Debugf(\"var = %s\", \"abc\")\nIn this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config.\nThis way you are able to use package level funcs instead of passing the logger variable.\n\nConfiguration\n\nMain seelog point is to configure logger via config files and not the code. So you can only specify\nformats and log rules by changing the configuration.\nThe configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try\nto create a logger using it.\n\nAll the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki.\nThere are many sections covering different aspects of seelog, but the most important for understanding configs are:\n    https://github.com/cihub/seelog/wiki/Constraints-and-exceptions\n    https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers\n    https://github.com/cihub/seelog/wiki/Formatting\n    https://github.com/cihub/seelog/wiki/Logger-types\nAfter you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date\nlist of dispatchers, receivers, formats, and logger types.\n\nHere is an example config with all these features:\n    <seelog type=\"adaptive\" mininterval=\"2000000\" maxinterval=\"100000000\" critmsgcount=\"500\" minlevel=\"debug\">\n        <exceptions>\n            <exception filepattern=\"test*\" minlevel=\"error\"/>\n        </exceptions>\n        <outputs formatid=\"all\">\n            <file path=\"all.log\"/>\n            <filter levels=\"info\">\n              <console formatid=\"fmtinfo\"/>\n            </filter>\n            <filter levels=\"error,critical\" formatid=\"fmterror\">\n              <console/>\n              <file path=\"errors.log\"/>\n            </filter>\n        </outputs>\n        <formats>\n            <format id=\"fmtinfo\" format=\"[%Level] [%Time] %Msg%n\"/>\n            <format id=\"fmterror\" format=\"[%LEVEL] [%Time] [%FuncShort @ %File.%Line] %Msg%n\"/>\n            <format id=\"all\" format=\"[%Level] [%Time] [@ %File.%Line] %Msg%n\"/>\n            <format id=\"criticalemail\" format=\"Critical error on our server!\\n    %Time %Date %RelFile %Func %Msg \\nSent by Seelog\"/>\n        </formats>\n    </seelog>\nThis config represents a logger with adaptive timeout between log messages (check logger types reference) which\nlogs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only\nuse log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test'\nthis logger prohibits all levels below 'error'.\n\nExamples\n\nTo learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples\nIt contains many example configs and usecases.\n*/\npackage seelog\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/format.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n)\n\n// FormatterSymbol is a special symbol used in config files to mark special format aliases.\nconst (\n\tFormatterSymbol = '%'\n)\nconst (\n\tformatterSymbolString   = \"%\"\n\tformatterParameterStart = '('\n\tformatterParameterEnd   = ')'\n)\n\n// These are the time and date formats that are used when %Date or %Time format aliases are used.\nconst (\n\tDateDefaultFormat = \"2006-01-02\"\n\tTimeFormat        = \"15:04:05\"\n)\n\nvar DefaultMsgFormat = \"%Ns [%Level] %Msg%n\"\n\nvar defaultformatter *formatter\nvar msgonlyformatter *formatter\n\nfunc init() {\n\tvar err error\n\tdefaultformatter, err = newFormatter(DefaultMsgFormat)\n\tif err != nil {\n\t\tfmt.Println(\"Error during defaultformatter creation: \" + err.Error())\n\t}\n\tmsgonlyformatter, err = newFormatter(\"%Msg\")\n\tif err != nil {\n\t\tfmt.Println(\"Error during msgonlyformatter creation: \" + err.Error())\n\t}\n}\n\n// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute\n// of the 'format' config item. These special symbols are replaced with context values or special\n// strings when message is written to byte receiver.\n//\n// Check https://github.com/cihub/seelog/wiki/Formatting for details.\n// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference\n//\n// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object\n// that can be evaluated as string.\ntype FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{}\n\n// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized\n// formatters (such as %Date or %EscM) and custom user formatters.\ntype FormatterFuncCreator func(param string) FormatterFunc\n\nvar formatterFuncs = map[string]FormatterFunc{\n\t\"Level\":     formatterLevel,\n\t\"Lev\":       formatterLev,\n\t\"LEVEL\":     formatterLEVEL,\n\t\"LEV\":       formatterLEV,\n\t\"l\":         formatterl,\n\t\"Msg\":       formatterMsg,\n\t\"FullPath\":  formatterFullPath,\n\t\"File\":      formatterFile,\n\t\"RelFile\":   formatterRelFile,\n\t\"Func\":      FormatterFunction,\n\t\"FuncShort\": FormatterFunctionShort,\n\t\"Line\":      formatterLine,\n\t\"Time\":      formatterTime,\n\t\"UTCTime\":   formatterUTCTime,\n\t\"Ns\":        formatterNs,\n\t\"UTCNs\":     formatterUTCNs,\n\t\"n\":         formattern,\n\t\"t\":         formattert,\n}\n\nvar formatterFuncsParameterized = map[string]FormatterFuncCreator{\n\t\"Date\":    createDateTimeFormatterFunc,\n\t\"UTCDate\": createUTCDateTimeFormatterFunc,\n\t\"EscM\":    createANSIEscapeFunc,\n}\n\nfunc errorAliasReserved(name string) error {\n\treturn fmt.Errorf(\"cannot use '%s' as custom formatter name. Name is reserved\", name)\n}\n\n// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil,\n// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and\n// it will be treated like the standard parameterized formatter identifiers.\n//\n// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation\n// is to call it once in 'init' func of your application or any initializer func.\n//\n// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters.\n//\n// Name must only consist of letters (unicode.IsLetter).\n//\n// Name must not be one of the already registered standard formatter names\n// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered\n// custom format names. To avoid any potential name conflicts (in future releases), it is recommended\n// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword.\nfunc RegisterCustomFormatter(name string, creator FormatterFuncCreator) error {\n\tif _, ok := formatterFuncs[name]; ok {\n\t\treturn errorAliasReserved(name)\n\t}\n\tif _, ok := formatterFuncsParameterized[name]; ok {\n\t\treturn errorAliasReserved(name)\n\t}\n\tformatterFuncsParameterized[name] = creator\n\treturn nil\n}\n\n// formatter is used to write messages in a specific format, inserting such additional data\n// as log level, date/time, etc.\ntype formatter struct {\n\tfmtStringOriginal string\n\tfmtString         string\n\tformatterFuncs    []FormatterFunc\n}\n\n// newFormatter creates a new formatter using a format string\nfunc newFormatter(formatString string) (*formatter, error) {\n\tnewformatter := new(formatter)\n\tnewformatter.fmtStringOriginal = formatString\n\n\terr := newformatter.buildFormatterFuncs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newformatter, nil\n}\n\nfunc (formatter *formatter) buildFormatterFuncs() error {\n\tformatter.formatterFuncs = make([]FormatterFunc, 0)\n\tvar fmtString string\n\tfor i := 0; i < len(formatter.fmtStringOriginal); i++ {\n\t\tchar := formatter.fmtStringOriginal[i]\n\t\tif char != FormatterSymbol {\n\t\t\tfmtString += string(char)\n\t\t\tcontinue\n\t\t}\n\n\t\tisEndOfStr := i == len(formatter.fmtStringOriginal)-1\n\t\tif isEndOfStr {\n\t\t\treturn fmt.Errorf(\"format error: %v - last symbol\", formatterSymbolString)\n\t\t}\n\n\t\tisDoubledFormatterSymbol := formatter.fmtStringOriginal[i+1] == FormatterSymbol\n\t\tif isDoubledFormatterSymbol {\n\t\t\tfmtString += formatterSymbolString\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\tfunction, nextI, err := formatter.extractFormatterFunc(i + 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmtString += \"%v\"\n\t\ti = nextI\n\t\tformatter.formatterFuncs = append(formatter.formatterFuncs, function)\n\t}\n\n\tformatter.fmtString = fmtString\n\treturn nil\n}\n\nfunc (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) {\n\tletterSequence := formatter.extractLetterSequence(index)\n\tif len(letterSequence) == 0 {\n\t\treturn nil, 0, fmt.Errorf(\"format error: lack of formatter after %v. At %v\", formatterSymbolString, index)\n\t}\n\n\tfunction, formatterLength, ok := formatter.findFormatterFunc(letterSequence)\n\tif ok {\n\t\treturn function, index + formatterLength - 1, nil\n\t}\n\n\tfunction, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif ok {\n\t\treturn function, index + formatterLength - 1, nil\n\t}\n\n\treturn nil, 0, errors.New(\"format error: unrecognized formatter at \" + strconv.Itoa(index) + \": \" + letterSequence)\n}\n\nfunc (formatter *formatter) extractLetterSequence(index int) string {\n\tletters := \"\"\n\n\tbytesToParse := []byte(formatter.fmtStringOriginal[index:])\n\truneCount := utf8.RuneCount(bytesToParse)\n\tfor i := 0; i < runeCount; i++ {\n\t\trune, runeSize := utf8.DecodeRune(bytesToParse)\n\t\tbytesToParse = bytesToParse[runeSize:]\n\n\t\tif unicode.IsLetter(rune) {\n\t\t\tletters += string(rune)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn letters\n}\n\nfunc (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) {\n\tcurrentVerb := letters\n\tfor i := 0; i < len(letters); i++ {\n\t\tfunction, ok := formatterFuncs[currentVerb]\n\t\tif ok {\n\t\t\treturn function, len(currentVerb), ok\n\t\t}\n\t\tcurrentVerb = currentVerb[:len(currentVerb)-1]\n\t}\n\n\treturn nil, 0, false\n}\n\nfunc (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) {\n\tcurrentVerb := letters\n\tfor i := 0; i < len(letters); i++ {\n\t\tfunctionCreator, ok := formatterFuncsParameterized[currentVerb]\n\t\tif ok {\n\t\t\tparameter := \"\"\n\t\t\tparameterLen := 0\n\t\t\tisVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless\n\t\t\tif isVerbEqualsLetters {\n\t\t\t\tuserParameter := \"\"\n\t\t\t\tvar err error\n\t\t\t\tuserParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb))\n\t\t\t\tif ok {\n\t\t\t\t\tparameter = userParameter\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn nil, 0, false, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn functionCreator(parameter), len(currentVerb) + parameterLen, true, nil\n\t\t}\n\n\t\tcurrentVerb = currentVerb[:len(currentVerb)-1]\n\t}\n\n\treturn nil, 0, false, nil\n}\n\nfunc (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) {\n\tif len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart {\n\t\treturn \"\", 0, false, nil\n\t}\n\n\tendIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd))\n\tif endIndex == -1 {\n\t\treturn \"\", 0, false, fmt.Errorf(\"Unmatched parenthesis or invalid parameter at %d: %s\",\n\t\t\tstartIndex, formatter.fmtStringOriginal[startIndex:])\n\t}\n\tendIndex += startIndex\n\n\tlength := endIndex - startIndex + 1\n\n\treturn formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil\n}\n\n// Format processes a message with special formatters, log level, and context. Returns formatted string\n// with all formatter identifiers changed to appropriate values.\nfunc (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string {\n\tif len(formatter.formatterFuncs) == 0 {\n\t\treturn formatter.fmtString\n\t}\n\n\tparams := make([]interface{}, len(formatter.formatterFuncs))\n\tfor i, function := range formatter.formatterFuncs {\n\t\tparams[i] = function(message, level, context)\n\t}\n\n\treturn fmt.Sprintf(formatter.fmtString, params...)\n}\n\nfunc (formatter *formatter) String() string {\n\treturn formatter.fmtStringOriginal\n}\n\n//=====================================================\n\nconst (\n\twrongLogLevel   = \"WRONG_LOGLEVEL\"\n\twrongEscapeCode = \"WRONG_ESCAPE\"\n)\n\nvar levelToString = map[LogLevel]string{\n\tTraceLvl:    \"Trace\",\n\tDebugLvl:    \"Debug\",\n\tInfoLvl:     \"Info\",\n\tWarnLvl:     \"Warn\",\n\tErrorLvl:    \"Error\",\n\tCriticalLvl: \"Critical\",\n\tOff:         \"Off\",\n}\n\nvar levelToShortString = map[LogLevel]string{\n\tTraceLvl:    \"Trc\",\n\tDebugLvl:    \"Dbg\",\n\tInfoLvl:     \"Inf\",\n\tWarnLvl:     \"Wrn\",\n\tErrorLvl:    \"Err\",\n\tCriticalLvl: \"Crt\",\n\tOff:         \"Off\",\n}\n\nvar levelToShortestString = map[LogLevel]string{\n\tTraceLvl:    \"t\",\n\tDebugLvl:    \"d\",\n\tInfoLvl:     \"i\",\n\tWarnLvl:     \"w\",\n\tErrorLvl:    \"e\",\n\tCriticalLvl: \"c\",\n\tOff:         \"o\",\n}\n\nfunc formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} {\n\tlevelStr, ok := levelToString[level]\n\tif !ok {\n\t\treturn wrongLogLevel\n\t}\n\treturn levelStr\n}\n\nfunc formatterLev(message string, level LogLevel, context LogContextInterface) interface{} {\n\tlevelStr, ok := levelToShortString[level]\n\tif !ok {\n\t\treturn wrongLogLevel\n\t}\n\treturn levelStr\n}\n\nfunc formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn strings.ToTitle(formatterLevel(message, level, context).(string))\n}\n\nfunc formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn strings.ToTitle(formatterLev(message, level, context).(string))\n}\n\nfunc formatterl(message string, level LogLevel, context LogContextInterface) interface{} {\n\tlevelStr, ok := levelToShortestString[level]\n\tif !ok {\n\t\treturn wrongLogLevel\n\t}\n\treturn levelStr\n}\n\nfunc formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn message\n}\n\nfunc formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.FullPath()\n}\n\nfunc formatterFile(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.FileName()\n}\n\nfunc formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.ShortPath()\n}\n\nfunc FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.Func()\n}\n\nfunc FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} {\n\tf := context.Func()\n\tspl := strings.Split(f, \".\")\n\treturn spl[len(spl)-1]\n}\n\nfunc formatterLine(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.Line()\n}\n\nfunc formatterTime(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.CallTime().Format(TimeFormat)\n}\n\nfunc formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.CallTime().UTC().Format(TimeFormat)\n}\n\nfunc formatterNs(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.CallTime().UnixNano()\n}\n\nfunc formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn context.CallTime().UTC().UnixNano()\n}\n\nfunc formattern(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn \"\\n\"\n}\n\nfunc formattert(message string, level LogLevel, context LogContextInterface) interface{} {\n\treturn \"\\t\"\n}\n\nfunc createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {\n\tformat := dateTimeFormat\n\tif format == \"\" {\n\t\tformat = DateDefaultFormat\n\t}\n\treturn func(message string, level LogLevel, context LogContextInterface) interface{} {\n\t\treturn context.CallTime().Format(format)\n\t}\n}\n\nfunc createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {\n\tformat := dateTimeFormat\n\tif format == \"\" {\n\t\tformat = DateDefaultFormat\n\t}\n\treturn func(message string, level LogLevel, context LogContextInterface) interface{} {\n\t\treturn context.CallTime().UTC().Format(format)\n\t}\n}\n\nfunc createANSIEscapeFunc(escapeCodeString string) FormatterFunc {\n\treturn func(message string, level LogLevel, context LogContextInterface) interface{} {\n\t\tif len(escapeCodeString) == 0 {\n\t\t\treturn wrongEscapeCode\n\t\t}\n\n\t\treturn fmt.Sprintf(\"%c[%sm\", 0x1B, escapeCodeString)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/format_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tTestFuncName = \"TestFormats\"\n)\n\ntype formatTest struct {\n\tformatString   string\n\tinput          string\n\tinputLogLevel  LogLevel\n\texpectedOutput string\n\terrorExpected  bool\n}\n\nvar formatTests = []formatTest{\n\t{\"test\", \"abcdef\", TraceLvl, \"test\", false},\n\t{\"\", \"abcdef\", TraceLvl, \"\", false},\n\t{\"%Level\", \"\", TraceLvl, \"Trace\", false},\n\t{\"%Level\", \"\", DebugLvl, \"Debug\", false},\n\t{\"%Level\", \"\", InfoLvl, \"Info\", false},\n\t{\"%Level\", \"\", WarnLvl, \"Warn\", false},\n\t{\"%Level\", \"\", ErrorLvl, \"Error\", false},\n\t{\"%Level\", \"\", CriticalLvl, \"Critical\", false},\n\t{\"[%Level]\", \"\", TraceLvl, \"[Trace]\", false},\n\t{\"[%Level]\", \"abc\", DebugLvl, \"[Debug]\", false},\n\t{\"%LevelLevel\", \"\", InfoLvl, \"InfoLevel\", false},\n\t{\"[%Level][%Level]\", \"\", WarnLvl, \"[Warn][Warn]\", false},\n\t{\"[%Level]X[%Level]\", \"\", ErrorLvl, \"[Error]X[Error]\", false},\n\t{\"%Levelll\", \"\", CriticalLvl, \"Criticalll\", false},\n\t{\"%Lvl\", \"\", TraceLvl, \"\", true},\n\t{\"%%Level\", \"\", DebugLvl, \"%Level\", false},\n\t{\"%Level%\", \"\", InfoLvl, \"\", true},\n\t{\"%sevel\", \"\", WarnLvl, \"\", true},\n\t{\"Level\", \"\", ErrorLvl, \"Level\", false},\n\t{\"%LevelLevel\", \"\", CriticalLvl, \"CriticalLevel\", false},\n\t{\"%Lev\", \"\", TraceLvl, \"Trc\", false},\n\t{\"%Lev\", \"\", DebugLvl, \"Dbg\", false},\n\t{\"%Lev\", \"\", InfoLvl, \"Inf\", false},\n\t{\"%Lev\", \"\", WarnLvl, \"Wrn\", false},\n\t{\"%Lev\", \"\", ErrorLvl, \"Err\", false},\n\t{\"%Lev\", \"\", CriticalLvl, \"Crt\", false},\n\t{\"[%Lev]\", \"\", TraceLvl, \"[Trc]\", false},\n\t{\"[%Lev]\", \"abc\", DebugLvl, \"[Dbg]\", false},\n\t{\"%LevLevel\", \"\", InfoLvl, \"InfLevel\", false},\n\t{\"[%Level][%Lev]\", \"\", WarnLvl, \"[Warn][Wrn]\", false},\n\t{\"[%Lev]X[%Lev]\", \"\", ErrorLvl, \"[Err]X[Err]\", false},\n\t{\"%Levll\", \"\", CriticalLvl, \"Crtll\", false},\n\t{\"%LEVEL\", \"\", TraceLvl, \"TRACE\", false},\n\t{\"%LEVEL\", \"\", DebugLvl, \"DEBUG\", false},\n\t{\"%LEVEL\", \"\", InfoLvl, \"INFO\", false},\n\t{\"%LEVEL\", \"\", WarnLvl, \"WARN\", false},\n\t{\"%LEVEL\", \"\", ErrorLvl, \"ERROR\", false},\n\t{\"%LEVEL\", \"\", CriticalLvl, \"CRITICAL\", false},\n\t{\"[%LEVEL]\", \"\", TraceLvl, \"[TRACE]\", false},\n\t{\"[%LEVEL]\", \"abc\", DebugLvl, \"[DEBUG]\", false},\n\t{\"%LEVELLEVEL\", \"\", InfoLvl, \"INFOLEVEL\", false},\n\t{\"[%LEVEL][%LEVEL]\", \"\", WarnLvl, \"[WARN][WARN]\", false},\n\t{\"[%LEVEL]X[%Level]\", \"\", ErrorLvl, \"[ERROR]X[Error]\", false},\n\t{\"%LEVELLL\", \"\", CriticalLvl, \"CRITICALLL\", false},\n\t{\"%LEV\", \"\", TraceLvl, \"TRC\", false},\n\t{\"%LEV\", \"\", DebugLvl, \"DBG\", false},\n\t{\"%LEV\", \"\", InfoLvl, \"INF\", false},\n\t{\"%LEV\", \"\", WarnLvl, \"WRN\", false},\n\t{\"%LEV\", \"\", ErrorLvl, \"ERR\", false},\n\t{\"%LEV\", \"\", CriticalLvl, \"CRT\", false},\n\t{\"[%LEV]\", \"\", TraceLvl, \"[TRC]\", false},\n\t{\"[%LEV]\", \"abc\", DebugLvl, \"[DBG]\", false},\n\t{\"%LEVLEVEL\", \"\", InfoLvl, \"INFLEVEL\", false},\n\t{\"[%LEVEL][%LEV]\", \"\", WarnLvl, \"[WARN][WRN]\", false},\n\t{\"[%LEV]X[%LEV]\", \"\", ErrorLvl, \"[ERR]X[ERR]\", false},\n\t{\"%LEVLL\", \"\", CriticalLvl, \"CRTLL\", false},\n\t{\"%l\", \"\", TraceLvl, \"t\", false},\n\t{\"%l\", \"\", DebugLvl, \"d\", false},\n\t{\"%l\", \"\", InfoLvl, \"i\", false},\n\t{\"%l\", \"\", WarnLvl, \"w\", false},\n\t{\"%l\", \"\", ErrorLvl, \"e\", false},\n\t{\"%l\", \"\", CriticalLvl, \"c\", false},\n\t{\"[%l]\", \"\", TraceLvl, \"[t]\", false},\n\t{\"[%l]\", \"abc\", DebugLvl, \"[d]\", false},\n\t{\"%Level%Msg\", \"\", TraceLvl, \"Trace\", false},\n\t{\"%Level%Msg\", \"A\", DebugLvl, \"DebugA\", false},\n\t{\"%Level%Msg\", \"\", InfoLvl, \"Info\", false},\n\t{\"%Level%Msg\", \"test\", WarnLvl, \"Warntest\", false},\n\t{\"%Level%Msg\", \" \", ErrorLvl, \"Error \", false},\n\t{\"%Level%Msg\", \"\", CriticalLvl, \"Critical\", false},\n\t{\"[%Level]\", \"\", TraceLvl, \"[Trace]\", false},\n\t{\"[%Level]\", \"abc\", DebugLvl, \"[Debug]\", false},\n\t{\"%Level%MsgLevel\", \"A\", InfoLvl, \"InfoALevel\", false},\n\t{\"[%Level]%Msg[%Level]\", \"test\", WarnLvl, \"[Warn]test[Warn]\", false},\n\t{\"[%Level]%MsgX[%Level]\", \"test\", ErrorLvl, \"[Error]testX[Error]\", false},\n\t{\"%Levell%Msgl\", \"Test\", CriticalLvl, \"CriticallTestl\", false},\n\t{\"%Lev%Msg%LEVEL%LEV%l%Msg\", \"Test\", InfoLvl, \"InfTestINFOINFiTest\", false},\n\t{\"%n\", \"\", CriticalLvl, \"\\n\", false},\n\t{\"%t\", \"\", CriticalLvl, \"\\t\", false},\n}\n\nfunc TestFormats(t *testing.T) {\n\n\tcontext, conErr := currentContext()\n\tif conErr != nil {\n\t\tt.Fatal(\"Cannot get current context:\" + conErr.Error())\n\t\treturn\n\t}\n\n\tfor _, test := range formatTests {\n\n\t\tform, err := newFormatter(test.formatString)\n\n\t\tif (err != nil) != test.errorExpected {\n\t\t\tt.Errorf(\"input: %s \\nInput LL: %s\\n* Expected error:%t Got error: %t\\n\",\n\t\t\t\ttest.input, test.inputLogLevel, test.errorExpected, (err != nil))\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"%s\\n\", err.Error())\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := form.Format(test.input, test.inputLogLevel, context)\n\n\t\tif err == nil && msg != test.expectedOutput {\n\t\t\tt.Errorf(\"format: %s \\nInput: %s \\nInput LL: %s\\n* Expected: %s \\n* Got: %s\\n\",\n\t\t\t\ttest.formatString, test.input, test.inputLogLevel, test.expectedOutput, msg)\n\t\t}\n\t}\n}\n\nfunc TestDateFormat(t *testing.T) {\n\t_, err := newFormatter(\"%Date\")\n\tif err != nil {\n\t\tt.Error(\"Unexpected error: \" + err.Error())\n\t}\n}\n\nfunc TestDateParameterizedFormat(t *testing.T) {\n\ttestFormat := \"Mon Jan 02 2006 15:04:05\"\n\tpreciseForamt := \"Mon Jan 02 2006 15:04:05.000\"\n\n\tcontext, conErr := currentContext()\n\tif conErr != nil {\n\t\tt.Fatal(\"Cannot get current context:\" + conErr.Error())\n\t\treturn\n\t}\n\n\tform, err := newFormatter(\"%Date(\" + preciseForamt + \")\")\n\tif err != nil {\n\t\tt.Error(\"Unexpected error: \" + err.Error())\n\t}\n\n\tdateBefore := time.Now().Format(testFormat)\n\tmsg := form.Format(\"\", TraceLvl, context)\n\tdateAfter := time.Now().Format(testFormat)\n\n\tif !strings.HasPrefix(msg, dateBefore) && !strings.HasPrefix(msg, dateAfter) {\n\t\tt.Errorf(\"incorrect message: %v. Expected %v or %v\", msg, dateBefore, dateAfter)\n\t}\n\n\t_, err = newFormatter(\"%Date(\" + preciseForamt)\n\tif err == nil {\n\t\tt.Error(\"Expected error for invalid format\")\n\t}\n}\n\nfunc createTestFormatter(format string) FormatterFunc {\n\treturn func(message string, level LogLevel, context LogContextInterface) interface{} {\n\t\treturn \"TEST \" + context.Func() + \" TEST\"\n\t}\n}\n\nfunc TestCustomFormatterRegistration(t *testing.T) {\n\terr := RegisterCustomFormatter(\"Level\", createTestFormatter)\n\tif err == nil {\n\t\tt.Errorf(\"expected an error when trying to register a custom formatter with a reserved alias\")\n\t}\n\terr = RegisterCustomFormatter(\"EscM\", createTestFormatter)\n\tif err == nil {\n\t\tt.Errorf(\"expected an error when trying to register a custom formatter with a reserved parameterized alias\")\n\t}\n\terr = RegisterCustomFormatter(\"TEST\", createTestFormatter)\n\tif err != nil {\n\t\tt.Fatalf(\"Registering custom formatter: unexpected error: %s\", err)\n\t}\n\terr = RegisterCustomFormatter(\"TEST\", createTestFormatter)\n\tif err == nil {\n\t\tt.Errorf(\"expected an error when trying to register a custom formatter with duplicate name\")\n\t}\n\n\tcontext, conErr := currentContext()\n\tif conErr != nil {\n\t\tt.Fatal(\"Cannot get current context:\" + conErr.Error())\n\t\treturn\n\t}\n\n\tform, err := newFormatter(\"%Msg %TEST 123\")\n\tif err != nil {\n\t\tt.Fatalf(\"%s\\n\", err.Error())\n\t}\n\n\texpected := fmt.Sprintf(\"test TEST %sTestCustomFormatterRegistration TEST 123\", commonPrefix)\n\tmsg := form.Format(\"test\", DebugLvl, context)\n\tif msg != expected {\n\t\tt.Fatalf(\"Custom formatter: invalid output. Expected: '%s'. Got: '%s'\", expected, msg)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/internals_baseerror.go",
    "content": "package seelog\n\n// Base struct for custom errors.\ntype baseError struct {\n\tmessage string\n}\n\nfunc (be baseError) Error() string {\n\treturn be.message\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/internals_byteverifiers_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\t\"testing\"\n)\n\n// bytesVerifier is a byte receiver which is used for correct input testing.\n// It allows to compare expected result and actual result in context of received bytes.\ntype bytesVerifier struct {\n\texpectedBytes   []byte // bytes that are expected to be written in next Write call\n\twaitingForInput bool   // true if verifier is waiting for a Write call\n\twrittenData     []byte // real bytes that actually were received during the last Write call\n\ttestEnv         *testing.T\n}\n\nfunc newBytesVerifier(t *testing.T) (*bytesVerifier, error) {\n\tif t == nil {\n\t\treturn nil, errors.New(\"testing environment param is nil\")\n\t}\n\n\tverifier := new(bytesVerifier)\n\tverifier.testEnv = t\n\n\treturn verifier, nil\n}\n\n// Write is used to check whether verifier was waiting for input and whether bytes are the same as expectedBytes.\n// After Write call, waitingForInput is set to false.\nfunc (verifier *bytesVerifier) Write(bytes []byte) (n int, err error) {\n\tif !verifier.waitingForInput {\n\t\tverifier.testEnv.Errorf(\"unexpected input: %v\", string(bytes))\n\t\treturn\n\t}\n\n\tverifier.waitingForInput = false\n\tverifier.writtenData = bytes\n\n\tif verifier.expectedBytes != nil {\n\t\tif bytes == nil {\n\t\t\tverifier.testEnv.Errorf(\"incoming 'bytes' is nil\")\n\t\t} else {\n\t\t\tif len(bytes) != len(verifier.expectedBytes) {\n\t\t\t\tverifier.testEnv.Errorf(\"'Bytes' has unexpected len. Expected: %d. Got: %d. . Expected string: %q. Got: %q\",\n\t\t\t\t\tlen(verifier.expectedBytes), len(bytes), string(verifier.expectedBytes), string(bytes))\n\t\t\t} else {\n\t\t\t\tfor i := 0; i < len(bytes); i++ {\n\t\t\t\t\tif verifier.expectedBytes[i] != bytes[i] {\n\t\t\t\t\t\tverifier.testEnv.Errorf(\"incorrect data on position %d. Expected: %d. Got: %d. Expected string: %q. Got: %q\",\n\t\t\t\t\t\t\ti, verifier.expectedBytes[i], bytes[i], string(verifier.expectedBytes), string(bytes))\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn len(bytes), nil\n}\n\nfunc (verifier *bytesVerifier) ExpectBytes(bytes []byte) {\n\tverifier.waitingForInput = true\n\tverifier.expectedBytes = bytes\n}\n\nfunc (verifier *bytesVerifier) MustNotExpect() {\n\tif verifier.waitingForInput {\n\t\terrorText := \"Unexpected input: \"\n\n\t\tif verifier.expectedBytes != nil {\n\t\t\terrorText += \"len = \" + strconv.Itoa(len(verifier.expectedBytes))\n\t\t\terrorText += \". text = \" + string(verifier.expectedBytes)\n\t\t}\n\n\t\tverifier.testEnv.Errorf(errorText)\n\t}\n}\n\nfunc (verifier *bytesVerifier) Close() error {\n\treturn nil\n}\n\n// nullWriter implements io.Writer inteface and does nothing, always returning a successful write result\ntype nullWriter struct {\n}\n\nfunc (writer *nullWriter) Write(bytes []byte) (n int, err error) {\n\treturn len(bytes), nil\n}\n\nfunc (writer *nullWriter) Close() error {\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/internals_fsutils.go",
    "content": "package seelog\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sync\"\n)\n\n// File and directory permitions.\nconst (\n\tdefaultFilePermissions      = 0666\n\tdefaultDirectoryPermissions = 0767\n)\n\nconst (\n\t// Max number of directories can be read asynchronously.\n\tmaxDirNumberReadAsync = 1000\n)\n\ntype cannotOpenFileError struct {\n\tbaseError\n}\n\nfunc newCannotOpenFileError(fname string) *cannotOpenFileError {\n\treturn &cannotOpenFileError{baseError{message: \"Cannot open file: \" + fname}}\n}\n\ntype notDirectoryError struct {\n\tbaseError\n}\n\nfunc newNotDirectoryError(dname string) *notDirectoryError {\n\treturn &notDirectoryError{baseError{message: dname + \" is not directory\"}}\n}\n\n// fileFilter is a filtering criteria function for '*os.File'.\n// Must return 'false' to set aside the given file.\ntype fileFilter func(os.FileInfo, *os.File) bool\n\n// filePathFilter is a filtering creteria function for file path.\n// Must return 'false' to set aside the given file.\ntype filePathFilter func(filePath string) bool\n\n// GetSubdirNames returns a list of directories found in\n// the given one with dirPath.\nfunc getSubdirNames(dirPath string) ([]string, error) {\n\tfi, err := os.Stat(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, newNotDirectoryError(dirPath)\n\t}\n\tdd, err := os.Open(dirPath)\n\t// Cannot open file.\n\tif err != nil {\n\t\tif dd != nil {\n\t\t\tdd.Close()\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer dd.Close()\n\t// TODO: Improve performance by buffering reading.\n\tallEntities, err := dd.Readdir(-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubDirs := []string{}\n\tfor _, entity := range allEntities {\n\t\tif entity.IsDir() {\n\t\t\tsubDirs = append(subDirs, entity.Name())\n\t\t}\n\t}\n\treturn subDirs, nil\n}\n\n// getSubdirAbsPaths recursively visit all the subdirectories\n// starting from the given directory and returns absolute paths for them.\nfunc getAllSubdirAbsPaths(dirPath string) (res []string, err error) {\n\tdps, err := getSubdirAbsPaths(dirPath)\n\tif err != nil {\n\t\tres = []string{}\n\t\treturn\n\t}\n\tres = append(res, dps...)\n\tfor _, dp := range dps {\n\t\tsdps, err := getAllSubdirAbsPaths(dp)\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tres = append(res, sdps...)\n\t}\n\treturn\n}\n\n// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory.\n// Input: (I1) dirPath - absolute path of a directory in question.\n// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation.\n// Remark: If error (O2) is non-nil then (O1) is nil and vice versa.\nfunc getSubdirAbsPaths(dirPath string) ([]string, error) {\n\tsdns, err := getSubdirNames(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trsdns := []string{}\n\tfor _, sdn := range sdns {\n\t\trsdns = append(rsdns, filepath.Join(dirPath, sdn))\n\t}\n\treturn rsdns, nil\n}\n\n// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory.\n// Remark: Ignores files for which fileFilter returns false\nfunc getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) {\n\tdfi, err := os.Open(dirPath)\n\tif err != nil {\n\t\treturn nil, newCannotOpenFileError(\"Cannot open directory \" + dirPath)\n\t}\n\tdefer dfi.Close()\n\t// Size of read buffer (i.e. chunk of items read at a time).\n\trbs := 64\n\tresFiles := []*os.File{}\nL:\n\tfor {\n\t\t// Read directory entities by reasonable chuncks\n\t\t// to prevent overflows on big number of files.\n\t\tfis, e := dfi.Readdir(rbs)\n\t\tswitch e {\n\t\t// It's OK.\n\t\tcase nil:\n\t\t// Do nothing, just continue cycle.\n\t\tcase io.EOF:\n\t\t\tbreak L\n\t\t// Something went wrong.\n\t\tdefault:\n\t\t\treturn nil, e\n\t\t}\n\t\t// THINK: Maybe, use async running.\n\t\tfor _, fi := range fis {\n\t\t\t// NB: On Linux this could be a problem as\n\t\t\t// there are lots of file types available.\n\t\t\tif !fi.IsDir() {\n\t\t\t\tf, e := os.Open(filepath.Join(dirPath, fi.Name()))\n\t\t\t\tif e != nil {\n\t\t\t\t\tif f != nil {\n\t\t\t\t\t\tf.Close()\n\t\t\t\t\t}\n\t\t\t\t\t// THINK: Add nil as indicator that a problem occurred.\n\t\t\t\t\tresFiles = append(resFiles, nil)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Check filter condition.\n\t\t\t\tif fFilter != nil && !fFilter(fi, f) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresFiles = append(resFiles, f)\n\t\t\t}\n\t\t}\n\t}\n\treturn resFiles, nil\n}\n\nfunc isRegular(m os.FileMode) bool {\n\treturn m&os.ModeType == 0\n}\n\n// getDirFilePaths return full paths of the files located in the directory.\n// Remark: Ignores files for which fileFilter returns false.\nfunc getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) {\n\tdfi, err := os.Open(dirPath)\n\tif err != nil {\n\t\treturn nil, newCannotOpenFileError(\"Cannot open directory \" + dirPath)\n\t}\n\tdefer dfi.Close()\n\n\tvar absDirPath string\n\tif !filepath.IsAbs(dirPath) {\n\t\tabsDirPath, err = filepath.Abs(dirPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot get absolute path of directory: %s\", err.Error())\n\t\t}\n\t} else {\n\t\tabsDirPath = dirPath\n\t}\n\n\t// TODO: check if dirPath is really directory.\n\t// Size of read buffer (i.e. chunk of items read at a time).\n\trbs := 2 << 5\n\tfilePaths := []string{}\n\n\tvar fp string\nL:\n\tfor {\n\t\t// Read directory entities by reasonable chuncks\n\t\t// to prevent overflows on big number of files.\n\t\tfis, e := dfi.Readdir(rbs)\n\t\tswitch e {\n\t\t// It's OK.\n\t\tcase nil:\n\t\t// Do nothing, just continue cycle.\n\t\tcase io.EOF:\n\t\t\tbreak L\n\t\t// Indicate that something went wrong.\n\t\tdefault:\n\t\t\treturn nil, e\n\t\t}\n\t\t// THINK: Maybe, use async running.\n\t\tfor _, fi := range fis {\n\t\t\t// NB: Should work on every Windows and non-Windows OS.\n\t\t\tif isRegular(fi.Mode()) {\n\t\t\t\tif pathIsName {\n\t\t\t\t\tfp = fi.Name()\n\t\t\t\t} else {\n\t\t\t\t\t// Build full path of a file.\n\t\t\t\t\tfp = filepath.Join(absDirPath, fi.Name())\n\t\t\t\t}\n\t\t\t\t// Check filter condition.\n\t\t\t\tif fpFilter != nil && !fpFilter(fp) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfilePaths = append(filePaths, fp)\n\t\t\t}\n\t\t}\n\t}\n\treturn filePaths, nil\n}\n\n// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs\n// in map 'filesInDirMap': Key - directory name, value - *os.File slice.\nfunc getOpenFilesByDirectoryAsync(\n\tdirPaths []string,\n\tfFilter fileFilter,\n\tfilesInDirMap map[string][]*os.File,\n) error {\n\tn := len(dirPaths)\n\tif n > maxDirNumberReadAsync {\n\t\treturn fmt.Errorf(\"number of input directories to be read exceeded max value %d\", maxDirNumberReadAsync)\n\t}\n\ttype filesInDirResult struct {\n\t\tDirName string\n\t\tFiles   []*os.File\n\t\tError   error\n\t}\n\tdirFilesChan := make(chan *filesInDirResult, n)\n\tvar wg sync.WaitGroup\n\t// Register n goroutines which are going to do work.\n\twg.Add(n)\n\tfor i := 0; i < n; i++ {\n\t\t// Launch asynchronously the piece of work.\n\t\tgo func(dirPath string) {\n\t\t\tfs, e := getOpenFilesInDir(dirPath, fFilter)\n\t\t\tdirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e}\n\t\t\t// Mark the current goroutine as finished (work is done).\n\t\t\twg.Done()\n\t\t}(dirPaths[i])\n\t}\n\t// Wait for all goroutines to finish their work.\n\twg.Wait()\n\t// Close the error channel to let for-range clause\n\t// get all the buffered values without blocking and quit in the end.\n\tclose(dirFilesChan)\n\tfor fidr := range dirFilesChan {\n\t\tif fidr.Error == nil {\n\t\t\t// THINK: What will happen if the key is already present?\n\t\t\tfilesInDirMap[fidr.DirName] = fidr.Files\n\t\t} else {\n\t\t\treturn fidr.Error\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc copyFile(sf *os.File, dst string) (int64, error) {\n\tdf, err := os.Create(dst)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer df.Close()\n\treturn io.Copy(df, sf)\n}\n\n// fileExists return flag whether a given file exists\n// and operation error if an unclassified failure occurs.\nfunc fileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// createDirectory makes directory with a given name\n// making all parent directories if necessary.\nfunc createDirectory(dirPath string) error {\n\tvar dPath string\n\tvar err error\n\tif !filepath.IsAbs(dirPath) {\n\t\tdPath, err = filepath.Abs(dirPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tdPath = dirPath\n\t}\n\texists, err := fileExists(dPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exists {\n\t\treturn nil\n\t}\n\treturn os.MkdirAll(dPath, os.ModeDir)\n}\n\n// tryRemoveFile gives a try removing the file\n// only ignoring an error when the file does not exist.\nfunc tryRemoveFile(filePath string) (err error) {\n\terr = os.Remove(filePath)\n\tif os.IsNotExist(err) {\n\t\terr = nil\n\t\treturn\n\t}\n\treturn\n}\n\n// Unzips a specified zip file. Returns filename->filebytes map.\nfunc unzip(archiveName string) (map[string][]byte, error) {\n\t// Open a zip archive for reading.\n\tr, err := zip.OpenReader(archiveName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\t// Files to be added to archive\n\t// map file name to contents\n\tfiles := make(map[string][]byte)\n\n\t// Iterate through the files in the archive,\n\t// printing some of their contents.\n\tfor _, f := range r.File {\n\t\trc, err := f.Open()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbts, err := ioutil.ReadAll(rc)\n\t\trcErr := rc.Close()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif rcErr != nil {\n\t\t\treturn nil, rcErr\n\t\t}\n\n\t\tfiles[f.Name] = bts\n\t}\n\n\treturn files, nil\n}\n\n// Creates a zip file with the specified file names and byte contents.\nfunc createZip(archiveName string, files map[string][]byte) error {\n\t// Create a buffer to write our archive to.\n\tbuf := new(bytes.Buffer)\n\n\t// Create a new zip archive.\n\tw := zip.NewWriter(buf)\n\n\t// Write files\n\tfor fpath, fcont := range files {\n\t\tf, err := w.Create(fpath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = f.Write([]byte(fcont))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Make sure to check the error on Close.\n\terr := w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(archiveName, buf.Bytes(), defaultFilePermissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/internals_xmlnode.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"encoding/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\ntype xmlNode struct {\n\tname       string\n\tattributes map[string]string\n\tchildren   []*xmlNode\n\tvalue      string\n}\n\nfunc newNode() *xmlNode {\n\tnode := new(xmlNode)\n\tnode.children = make([]*xmlNode, 0)\n\tnode.attributes = make(map[string]string)\n\treturn node\n}\n\nfunc (node *xmlNode) String() string {\n\tstr := fmt.Sprintf(\"<%s\", node.name)\n\n\tfor attrName, attrVal := range node.attributes {\n\t\tstr += fmt.Sprintf(\" %s=\\\"%s\\\"\", attrName, attrVal)\n\t}\n\n\tstr += \">\"\n\tstr += node.value\n\n\tif len(node.children) != 0 {\n\t\tfor _, child := range node.children {\n\t\t\tstr += fmt.Sprintf(\"%s\", child)\n\t\t}\n\t}\n\n\tstr += fmt.Sprintf(\"</%s>\", node.name)\n\n\treturn str\n}\n\nfunc (node *xmlNode) unmarshal(startEl xml.StartElement) error {\n\tnode.name = startEl.Name.Local\n\n\tfor _, v := range startEl.Attr {\n\t\t_, alreadyExists := node.attributes[v.Name.Local]\n\t\tif alreadyExists {\n\t\t\treturn errors.New(\"tag '\" + node.name + \"' has duplicated attribute: '\" + v.Name.Local + \"'\")\n\t\t}\n\t\tnode.attributes[v.Name.Local] = v.Value\n\t}\n\n\treturn nil\n}\n\nfunc (node *xmlNode) add(child *xmlNode) {\n\tif node.children == nil {\n\t\tnode.children = make([]*xmlNode, 0)\n\t}\n\n\tnode.children = append(node.children, child)\n}\n\nfunc (node *xmlNode) hasChildren() bool {\n\treturn node.children != nil && len(node.children) > 0\n}\n\n//=============================================\n\nfunc unmarshalConfig(reader io.Reader) (*xmlNode, error) {\n\txmlParser := xml.NewDecoder(reader)\n\n\tconfig, err := unmarshalNode(xmlParser, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif config == nil {\n\t\treturn nil, errors.New(\"xml has no content\")\n\t}\n\n\tnextConfigEntry, err := unmarshalNode(xmlParser, nil)\n\tif nextConfigEntry != nil {\n\t\treturn nil, errors.New(\"xml contains more than one root element\")\n\t}\n\n\treturn config, nil\n}\n\nfunc unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) {\n\tfirstLoop := true\n\tfor {\n\t\tvar tok xml.Token\n\t\tif firstLoop && curToken != nil {\n\t\t\ttok = curToken\n\t\t\tfirstLoop = false\n\t\t} else {\n\t\t\ttok, err = getNextToken(xmlParser)\n\t\t\tif err != nil || tok == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tswitch tt := tok.(type) {\n\t\tcase xml.SyntaxError:\n\t\t\terr = errors.New(tt.Error())\n\t\t\treturn\n\t\tcase xml.CharData:\n\t\t\tvalue := strings.TrimSpace(string([]byte(tt)))\n\t\t\tif node != nil {\n\t\t\t\tnode.value += value\n\t\t\t}\n\t\tcase xml.StartElement:\n\t\t\tif node == nil {\n\t\t\t\tnode = newNode()\n\t\t\t\terr := node.unmarshal(tt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tchildNode, childErr := unmarshalNode(xmlParser, tok)\n\t\t\t\tif childErr != nil {\n\t\t\t\t\treturn nil, childErr\n\t\t\t\t}\n\n\t\t\t\tif childNode != nil {\n\t\t\t\t\tnode.add(childNode)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase xml.EndElement:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) {\n\tif tok, err = xmlParser.Token(); err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\treturn\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/internals_xmlnode_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t//\"fmt\"\n\t\"reflect\"\n)\n\nvar testEnv *testing.T\n\n/*func TestWrapper(t *testing.T) {\n\ttestEnv = t\n\n\ts := \"<a d='a'><g m='a'></g><g h='t' j='kk'></g></a>\"\n\treader := strings.NewReader(s)\n\tconfig, err := unmarshalConfig(reader)\n\tif err != nil {\n\t\ttestEnv.Error(err)\n\t\treturn\n\t}\n\n\tprintXML(config, 0)\n}\n\nfunc printXML(node *xmlNode, level int) {\n\tindent := strings.Repeat(\"\\t\", level)\n\tfmt.Print(indent + node.name)\n\tfor key, value := range node.attributes {\n\t\tfmt.Print(\" \" + key + \"/\" + value)\n\t}\n\tfmt.Println()\n\n\tfor _, child := range node.children {\n\t\tprintXML(child, level+1)\n\t}\n}*/\n\nvar xmlNodeTests []xmlNodeTest\n\ntype xmlNodeTest struct {\n\ttestName      string\n\tinputXML      string\n\texpected      interface{}\n\terrorExpected bool\n}\n\nfunc getXMLTests() []xmlNodeTest {\n\tif xmlNodeTests == nil {\n\t\txmlNodeTests = make([]xmlNodeTest, 0)\n\n\t\ttestName := \"Simple test\"\n\t\ttestXML := `<a></a>`\n\t\ttestExpected := newNode()\n\t\ttestExpected.name = \"a\"\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\n\t\ttestName = \"Multiline test\"\n\t\ttestXML =\n\t\t\t`\n<a>\n</a>\n`\n\t\ttestExpected = newNode()\n\t\ttestExpected.name = \"a\"\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\n\t\ttestName = \"Multiline test #2\"\n\t\ttestXML =\n\t\t\t`\n\n\n<a>\n\n</a>\n\n`\n\t\ttestExpected = newNode()\n\t\ttestExpected.name = \"a\"\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\n\t\ttestName = \"Incorrect names\"\n\t\ttestXML = `< a     ><      /a >`\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true})\n\n\t\ttestName = \"Comments\"\n\t\ttestXML =\n\t\t\t`<!-- <abcdef/> -->\n<a> <!-- <!--12345-->\n</a>\n`\n\t\ttestExpected = newNode()\n\t\ttestExpected.name = \"a\"\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\n\t\ttestName = \"Multiple roots\"\n\t\ttestXML = `<a></a><b></b>`\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true})\n\n\t\ttestName = \"Multiple roots + incorrect xml\"\n\t\ttestXML = `<a></a><b>`\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true})\n\n\t\ttestName = \"Some unicode and data\"\n\t\ttestXML = `<俄语>данные</俄语>`\n\t\ttestExpected = newNode()\n\t\ttestExpected.name = \"俄语\"\n\t\ttestExpected.value = \"данные\"\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\n\t\ttestName = \"Values and children\"\n\t\ttestXML = `<俄语>данные<and_a_child></and_a_child></俄语>`\n\t\ttestExpected = newNode()\n\t\ttestExpected.name = \"俄语\"\n\t\ttestExpected.value = \"данные\"\n\t\tchild := newNode()\n\t\tchild.name = \"and_a_child\"\n\t\ttestExpected.children = append(testExpected.children, child)\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\n\t\ttestName = \"Just children\"\n\t\ttestXML = `<俄语><and_a_child></and_a_child></俄语>`\n\t\ttestExpected = newNode()\n\t\ttestExpected.name = \"俄语\"\n\t\tchild = newNode()\n\t\tchild.name = \"and_a_child\"\n\t\ttestExpected.children = append(testExpected.children, child)\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\n\t\ttestName = \"Mixed test\"\n\t\ttestXML = `<俄语 a=\"1\" b=\"2.13\" c=\"abc\"><child abc=\"bca\"/><child abc=\"def\"></child></俄语>`\n\t\ttestExpected = newNode()\n\t\ttestExpected.name = \"俄语\"\n\t\ttestExpected.attributes[\"a\"] = \"1\"\n\t\ttestExpected.attributes[\"b\"] = \"2.13\"\n\t\ttestExpected.attributes[\"c\"] = \"abc\"\n\t\tchild = newNode()\n\t\tchild.name = \"child\"\n\t\tchild.attributes[\"abc\"] = \"bca\"\n\t\ttestExpected.children = append(testExpected.children, child)\n\t\tchild = newNode()\n\t\tchild.name = \"child\"\n\t\tchild.attributes[\"abc\"] = \"def\"\n\t\ttestExpected.children = append(testExpected.children, child)\n\t\txmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})\n\t}\n\n\treturn xmlNodeTests\n}\n\nfunc TestXmlNode(t *testing.T) {\n\n\tfor _, test := range getXMLTests() {\n\n\t\treader := strings.NewReader(test.inputXML)\n\t\tparsedXML, err := unmarshalConfig(reader)\n\n\t\tif (err != nil) != test.errorExpected {\n\t\t\tt.Errorf(\"\\n%s:\\nXML input: %s\\nExpected error:%t. Got error: %t\\n\", test.testName,\n\t\t\t\ttest.inputXML, test.errorExpected, (err != nil))\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"%s\\n\", err.Error())\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif err == nil && !reflect.DeepEqual(parsedXML, test.expected) {\n\t\t\tt.Errorf(\"\\n%s:\\nXML input: %s\\nExpected: %s. \\nGot: %s\\n\", test.testName,\n\t\t\t\ttest.inputXML, test.expected, parsedXML)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/log.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tstaticFuncCallDepth = 3 // See 'commonLogger.log' method comments\n\tloggerFuncCallDepth = 3\n)\n\n// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc.\nvar Current LoggerInterface\n\n// Default logger that is created from an empty config: \"<seelog/>\". It is not closed by a ReplaceLogger call.\nvar Default LoggerInterface\n\n// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call.\nvar Disabled LoggerInterface\n\nvar pkgOperationsMutex *sync.Mutex\n\nfunc init() {\n\tpkgOperationsMutex = new(sync.Mutex)\n\tvar err error\n\n\tif Default == nil {\n\t\tDefault, err = LoggerFromConfigAsBytes([]byte(\"<seelog />\"))\n\t}\n\n\tif Disabled == nil {\n\t\tDisabled, err = LoggerFromConfigAsBytes([]byte(\"<seelog levels=\\\"off\\\"/>\"))\n\t}\n\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Seelog couldn't start. Error: %s\", err.Error()))\n\t}\n\n\tCurrent = Default\n}\n\nfunc createLoggerFromConfig(config *logConfig) (LoggerInterface, error) {\n\tif config.LogType == syncloggerTypeFromString {\n\t\treturn newSyncLogger(config), nil\n\t} else if config.LogType == asyncLooploggerTypeFromString {\n\t\treturn newAsyncLoopLogger(config), nil\n\t} else if config.LogType == asyncTimerloggerTypeFromString {\n\t\tlogData := config.LoggerData\n\t\tif logData == nil {\n\t\t\treturn nil, errors.New(\"async timer data not set\")\n\t\t}\n\n\t\tasyncInt, ok := logData.(asyncTimerLoggerData)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"invalid async timer data\")\n\t\t}\n\n\t\tlogger, err := newAsyncTimerLogger(config, time.Duration(asyncInt.AsyncInterval))\n\t\tif !ok {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn logger, nil\n\t} else if config.LogType == adaptiveLoggerTypeFromString {\n\t\tlogData := config.LoggerData\n\t\tif logData == nil {\n\t\t\treturn nil, errors.New(\"adaptive logger parameters not set\")\n\t\t}\n\n\t\tadaptData, ok := logData.(adaptiveLoggerData)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"invalid adaptive logger parameters\")\n\t\t}\n\n\t\tlogger, err := newAsyncAdaptiveLogger(\n\t\t\tconfig,\n\t\t\ttime.Duration(adaptData.MinInterval),\n\t\t\ttime.Duration(adaptData.MaxInterval),\n\t\t\tadaptData.CriticalMsgCount,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn logger, nil\n\t}\n\treturn nil, errors.New(\"invalid config log type/data\")\n}\n\n// UseLogger sets the 'Current' package level logger variable to the specified value.\n// This variable is used in all Trace/Debug/... package level convenience funcs.\n//\n// Example:\n//\n// after calling\n//     seelog.UseLogger(somelogger)\n// the following:\n//     seelog.Debug(\"abc\")\n// will be equal to\n//     somelogger.Debug(\"abc\")\n//\n// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if\n// you constantly use it to replace loggers and don't close them in other code, you'll\n// end up having memory leaks.\n//\n// To safely replace loggers, use ReplaceLogger.\nfunc UseLogger(logger LoggerInterface) error {\n\tif logger == nil {\n\t\treturn errors.New(\"logger can not be nil\")\n\t}\n\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\n\toldLogger := Current\n\tCurrent = logger\n\n\tif oldLogger != nil {\n\t\toldLogger.Flush()\n\t}\n\n\treturn nil\n}\n\n// ReplaceLogger acts as UseLogger but the logger that was previously\n// used is disposed (except Default and Disabled loggers).\n//\n// Example:\n//     import log \"github.com/cihub/seelog\"\n//\n//     func main() {\n//         logger, err := log.LoggerFromConfigAsFile(\"seelog.xml\")\n//\n//         if err != nil {\n//             panic(err)\n//         }\n//\n//         log.ReplaceLogger(logger)\n//         defer log.Flush()\n//\n//         log.Trace(\"test\")\n//         log.Debugf(\"var = %s\", \"abc\")\n//     }\nfunc ReplaceLogger(logger LoggerInterface) error {\n\tif logger == nil {\n\t\treturn errors.New(\"logger can not be nil\")\n\t}\n\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\treportInternalError(fmt.Errorf(\"recovered from panic during ReplaceLogger: %s\", err))\n\t\t}\n\t}()\n\n\tif Current == Default {\n\t\tCurrent.Flush()\n\t} else if Current != nil && !Current.Closed() && Current != Disabled {\n\t\tCurrent.Flush()\n\t\tCurrent.Close()\n\t}\n\n\tCurrent = logger\n\n\treturn nil\n}\n\n// Tracef formats message according to format specifier\n// and writes to default logger with log level = Trace.\nfunc Tracef(format string, params ...interface{}) {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tCurrent.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))\n}\n\n// Debugf formats message according to format specifier\n// and writes to default logger with log level = Debug.\nfunc Debugf(format string, params ...interface{}) {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tCurrent.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))\n}\n\n// Infof formats message according to format specifier\n// and writes to default logger with log level = Info.\nfunc Infof(format string, params ...interface{}) {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tCurrent.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))\n}\n\n// Warnf formats message according to format specifier and writes to default logger with log level = Warn\nfunc Warnf(format string, params ...interface{}) error {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tmessage := newLogFormattedMessage(format, params)\n\tCurrent.warnWithCallDepth(staticFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\n// Errorf formats message according to format specifier and writes to default logger with log level = Error\nfunc Errorf(format string, params ...interface{}) error {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tmessage := newLogFormattedMessage(format, params)\n\tCurrent.errorWithCallDepth(staticFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\n// Criticalf formats message according to format specifier and writes to default logger with log level = Critical\nfunc Criticalf(format string, params ...interface{}) error {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tmessage := newLogFormattedMessage(format, params)\n\tCurrent.criticalWithCallDepth(staticFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\n// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace\nfunc Trace(v ...interface{}) {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tCurrent.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v))\n}\n\n// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug\nfunc Debug(v ...interface{}) {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tCurrent.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v))\n}\n\n// Info formats message using the default formats for its operands and writes to default logger with log level = Info\nfunc Info(v ...interface{}) {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tCurrent.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v))\n}\n\n// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn\nfunc Warn(v ...interface{}) error {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tmessage := newLogMessage(v)\n\tCurrent.warnWithCallDepth(staticFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\n// Error formats message using the default formats for its operands and writes to default logger with log level = Error\nfunc Error(v ...interface{}) error {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tmessage := newLogMessage(v)\n\tCurrent.errorWithCallDepth(staticFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\n// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical\nfunc Critical(v ...interface{}) error {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tmessage := newLogMessage(v)\n\tCurrent.criticalWithCallDepth(staticFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\n// Flush immediately processes all currently queued messages and all currently buffered messages.\n// It is a blocking call which returns only after the queue is empty and all the buffers are empty.\n//\n// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '<buffered>' receivers)\n// , because there is no queue.\n//\n// Call this method when your app is going to shut down not to lose any log messages.\nfunc Flush() {\n\tpkgOperationsMutex.Lock()\n\tdefer pkgOperationsMutex.Unlock()\n\tCurrent.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/logger.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n)\n\nfunc reportInternalError(err error) {\n\tfmt.Fprintln(os.Stderr, \"Seelog error: \"+err.Error())\n}\n\n// LoggerInterface represents structs capable of logging Seelog messages\ntype LoggerInterface interface {\n\n\t// Tracef formats message according to format specifier\n\t// and writes to log with level = Trace.\n\tTracef(format string, params ...interface{})\n\n\t// Debugf formats message according to format specifier\n\t// and writes to log with level = Debug.\n\tDebugf(format string, params ...interface{})\n\n\t// Infof formats message according to format specifier\n\t// and writes to log with level = Info.\n\tInfof(format string, params ...interface{})\n\n\t// Warnf formats message according to format specifier\n\t// and writes to log with level = Warn.\n\tWarnf(format string, params ...interface{}) error\n\n\t// Errorf formats message according to format specifier\n\t// and writes to log with level = Error.\n\tErrorf(format string, params ...interface{}) error\n\n\t// Criticalf formats message according to format specifier\n\t// and writes to log with level = Critical.\n\tCriticalf(format string, params ...interface{}) error\n\n\t// Trace formats message using the default formats for its operands\n\t// and writes to log with level = Trace\n\tTrace(v ...interface{})\n\n\t// Debug formats message using the default formats for its operands\n\t// and writes to log with level = Debug\n\tDebug(v ...interface{})\n\n\t// Info formats message using the default formats for its operands\n\t// and writes to log with level = Info\n\tInfo(v ...interface{})\n\n\t// Warn formats message using the default formats for its operands\n\t// and writes to log with level = Warn\n\tWarn(v ...interface{}) error\n\n\t// Error formats message using the default formats for its operands\n\t// and writes to log with level = Error\n\tError(v ...interface{}) error\n\n\t// Critical formats message using the default formats for its operands\n\t// and writes to log with level = Critical\n\tCritical(v ...interface{}) error\n\n\ttraceWithCallDepth(callDepth int, message fmt.Stringer)\n\tdebugWithCallDepth(callDepth int, message fmt.Stringer)\n\tinfoWithCallDepth(callDepth int, message fmt.Stringer)\n\twarnWithCallDepth(callDepth int, message fmt.Stringer)\n\terrorWithCallDepth(callDepth int, message fmt.Stringer)\n\tcriticalWithCallDepth(callDepth int, message fmt.Stringer)\n\n\t// Close flushes all the messages in the logger and closes it. It cannot be used after this operation.\n\tClose()\n\n\t// Flush flushes all the messages in the logger.\n\tFlush()\n\n\t// Closed returns true if the logger was previously closed.\n\tClosed() bool\n\n\t// SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller\n\t// when getting function information needed to print seelog format identifiers such as %Func or %File.\n\t//\n\t// This func may be used when you wrap seelog funcs and want to print caller info of you own\n\t// wrappers instead of seelog func callers. In this case you should set depth = 1. If you then\n\t// wrap your wrapper, you should set depth = 2, etc.\n\t//\n\t// NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect\n\t// function/file names in log files. Do not use it if you are not going to wrap seelog funcs.\n\t// You may reset the value to default using a SetAdditionalStackDepth(0) call.\n\tSetAdditionalStackDepth(depth int) error\n}\n\n// innerLoggerInterface is an internal logging interface\ntype innerLoggerInterface interface {\n\tinnerLog(level LogLevel, context LogContextInterface, message fmt.Stringer)\n\tFlush()\n}\n\n// [file path][func name][level] -> [allowed]\ntype allowedContextCache map[string]map[string]map[LogLevel]bool\n\n// commonLogger contains all common data needed for logging and contains methods used to log messages.\ntype commonLogger struct {\n\tconfig        *logConfig          // Config used for logging\n\tcontextCache  allowedContextCache // Caches whether log is enabled for specific \"full path-func name-level\" sets\n\tclosed        bool                // 'true' when all writers are closed, all data is flushed, logger is unusable.\n\tm             sync.Mutex          // Mutex for main operations\n\tunusedLevels  []bool\n\tinnerLogger   innerLoggerInterface\n\taddStackDepth int // Additional stack depth needed for correct seelog caller context detection\n}\n\nfunc newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger {\n\tcLogger := new(commonLogger)\n\n\tcLogger.config = config\n\tcLogger.contextCache = make(allowedContextCache)\n\tcLogger.unusedLevels = make([]bool, Off)\n\tcLogger.fillUnusedLevels()\n\tcLogger.innerLogger = internalLogger\n\n\treturn cLogger\n}\n\nfunc (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error {\n\tif depth < 0 {\n\t\treturn fmt.Errorf(\"negative depth: %d\", depth)\n\t}\n\tcLogger.m.Lock()\n\tcLogger.addStackDepth = depth\n\tcLogger.m.Unlock()\n\treturn nil\n}\n\nfunc (cLogger *commonLogger) Tracef(format string, params ...interface{}) {\n\tcLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))\n}\n\nfunc (cLogger *commonLogger) Debugf(format string, params ...interface{}) {\n\tcLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))\n}\n\nfunc (cLogger *commonLogger) Infof(format string, params ...interface{}) {\n\tcLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))\n}\n\nfunc (cLogger *commonLogger) Warnf(format string, params ...interface{}) error {\n\tmessage := newLogFormattedMessage(format, params)\n\tcLogger.warnWithCallDepth(loggerFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\nfunc (cLogger *commonLogger) Errorf(format string, params ...interface{}) error {\n\tmessage := newLogFormattedMessage(format, params)\n\tcLogger.errorWithCallDepth(loggerFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\nfunc (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error {\n\tmessage := newLogFormattedMessage(format, params)\n\tcLogger.criticalWithCallDepth(loggerFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\nfunc (cLogger *commonLogger) Trace(v ...interface{}) {\n\tcLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v))\n}\n\nfunc (cLogger *commonLogger) Debug(v ...interface{}) {\n\tcLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v))\n}\n\nfunc (cLogger *commonLogger) Info(v ...interface{}) {\n\tcLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v))\n}\n\nfunc (cLogger *commonLogger) Warn(v ...interface{}) error {\n\tmessage := newLogMessage(v)\n\tcLogger.warnWithCallDepth(loggerFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\nfunc (cLogger *commonLogger) Error(v ...interface{}) error {\n\tmessage := newLogMessage(v)\n\tcLogger.errorWithCallDepth(loggerFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\nfunc (cLogger *commonLogger) Critical(v ...interface{}) error {\n\tmessage := newLogMessage(v)\n\tcLogger.criticalWithCallDepth(loggerFuncCallDepth, message)\n\treturn errors.New(message.String())\n}\n\nfunc (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) {\n\tcLogger.log(TraceLvl, message, callDepth)\n}\n\nfunc (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) {\n\tcLogger.log(DebugLvl, message, callDepth)\n}\n\nfunc (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) {\n\tcLogger.log(InfoLvl, message, callDepth)\n}\n\nfunc (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) {\n\tcLogger.log(WarnLvl, message, callDepth)\n}\n\nfunc (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) {\n\tcLogger.log(ErrorLvl, message, callDepth)\n}\n\nfunc (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) {\n\tcLogger.log(CriticalLvl, message, callDepth)\n\tcLogger.innerLogger.Flush()\n}\n\nfunc (cLogger *commonLogger) Closed() bool {\n\treturn cLogger.closed\n}\n\nfunc (cLogger *commonLogger) fillUnusedLevels() {\n\tfor i := 0; i < len(cLogger.unusedLevels); i++ {\n\t\tcLogger.unusedLevels[i] = true\n\t}\n\n\tcLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints)\n\n\tfor _, exception := range cLogger.config.Exceptions {\n\t\tcLogger.fillUnusedLevelsByContraint(exception)\n\t}\n}\n\nfunc (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) {\n\tfor i := 0; i < len(cLogger.unusedLevels); i++ {\n\t\tif constraint.IsAllowed(LogLevel(i)) {\n\t\t\tcLogger.unusedLevels[i] = false\n\t\t}\n\t}\n}\n\n// stackCallDepth is used to indicate the call depth of 'log' func.\n// This depth level is used in the runtime.Caller(...) call. See\n// common_context.go -> specificContext, extractCallerInfo for details.\nfunc (cLogger *commonLogger) log(\n\tlevel LogLevel,\n\tmessage fmt.Stringer,\n\tstackCallDepth int) {\n\tcLogger.m.Lock()\n\tdefer cLogger.m.Unlock()\n\n\tif cLogger.Closed() {\n\t\treturn\n\t}\n\n\tif cLogger.unusedLevels[level] {\n\t\treturn\n\t}\n\n\tcontext, _ := specificContext(stackCallDepth + cLogger.addStackDepth)\n\n\t// Context errors are not reported because there are situations\n\t// in which context errors are normal Seelog usage cases. For\n\t// example in executables with stripped symbols.\n\t// Error contexts are returned instead. See common_context.go.\n\t/*if err != nil {\n\t\treportInternalError(err)\n\t\treturn\n\t}*/\n\n\tcLogger.innerLogger.innerLog(level, context, message)\n}\n\nfunc (cLogger *commonLogger) processLogMsg(\n\tlevel LogLevel,\n\tmessage fmt.Stringer,\n\tcontext LogContextInterface) {\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\treportInternalError(fmt.Errorf(\"recovered from panic during message processing: %s\", err))\n\t\t}\n\t}()\n\n\tif cLogger.config.IsAllowed(level, context) {\n\t\tcLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError)\n\t}\n}\n\nfunc (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool {\n\tfuncMap, ok := cLogger.contextCache[context.FullPath()]\n\tif !ok {\n\t\tfuncMap = make(map[string]map[LogLevel]bool, 0)\n\t\tcLogger.contextCache[context.FullPath()] = funcMap\n\t}\n\n\tlevelMap, ok := funcMap[context.Func()]\n\tif !ok {\n\t\tlevelMap = make(map[LogLevel]bool, 0)\n\t\tfuncMap[context.Func()] = levelMap\n\t}\n\n\tisAllowValue, ok := levelMap[level]\n\tif !ok {\n\t\tisAllowValue = cLogger.config.IsAllowed(level, context)\n\t\tlevelMap[level] = isAllowValue\n\t}\n\n\treturn isAllowValue\n}\n\ntype logMessage struct {\n\tparams []interface{}\n}\n\ntype logFormattedMessage struct {\n\tformat string\n\tparams []interface{}\n}\n\nfunc newLogMessage(params []interface{}) fmt.Stringer {\n\tmessage := new(logMessage)\n\n\tmessage.params = params\n\n\treturn message\n}\n\nfunc newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage {\n\tmessage := new(logFormattedMessage)\n\n\tmessage.params = params\n\tmessage.format = format\n\n\treturn message\n}\n\nfunc (message *logMessage) String() string {\n\treturn fmt.Sprint(message.params...)\n}\n\nfunc (message *logFormattedMessage) String() string {\n\treturn fmt.Sprintf(message.format, message.params...)\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_bufferedwriter.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n)\n\n// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full\ntype bufferedWriter struct {\n\tflushPeriod time.Duration // data flushes interval (in microseconds)\n\tbufferMutex *sync.Mutex   // mutex for buffer operations syncronization\n\tinnerWriter io.Writer     // inner writer\n\tbuffer      *bufio.Writer // buffered wrapper for inner writer\n\tbufferSize  int           // max size of data chunk in bytes\n}\n\n// newBufferedWriter creates a new buffered writer struct.\n// bufferSize -- size of memory buffer in bytes\n// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality\nfunc newBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) {\n\n\tif innerWriter == nil {\n\t\treturn nil, errors.New(\"argument is nil: innerWriter\")\n\t}\n\tif flushPeriod < 0 {\n\t\treturn nil, fmt.Errorf(\"flushPeriod can not be less than 0. Got: %d\", flushPeriod)\n\t}\n\n\tif bufferSize <= 0 {\n\t\treturn nil, fmt.Errorf(\"bufferSize can not be less or equal to 0. Got: %d\", bufferSize)\n\t}\n\n\tbuffer := bufio.NewWriterSize(innerWriter, bufferSize)\n\n\t/*if err != nil {\n\t\treturn nil, err\n\t}*/\n\n\tnewWriter := new(bufferedWriter)\n\n\tnewWriter.innerWriter = innerWriter\n\tnewWriter.buffer = buffer\n\tnewWriter.bufferSize = bufferSize\n\tnewWriter.flushPeriod = flushPeriod * 1e6\n\tnewWriter.bufferMutex = new(sync.Mutex)\n\n\tif flushPeriod != 0 {\n\t\tgo newWriter.flushPeriodically()\n\t}\n\n\treturn newWriter, nil\n}\n\nfunc (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) {\n\tbufferedLen := bufWriter.buffer.Buffered()\n\n\tn, err = bufWriter.flushInner()\n\tif err != nil {\n\t\treturn\n\t}\n\n\twritten, writeErr := bufWriter.innerWriter.Write(bytes)\n\treturn bufferedLen + written, writeErr\n}\n\n// Sends data to buffer manager. Waits until all buffers are full.\nfunc (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) {\n\n\tbufWriter.bufferMutex.Lock()\n\tdefer bufWriter.bufferMutex.Unlock()\n\n\tbytesLen := len(bytes)\n\n\tif bytesLen > bufWriter.bufferSize {\n\t\treturn bufWriter.writeBigChunk(bytes)\n\t}\n\n\tif bytesLen > bufWriter.buffer.Available() {\n\t\tn, err = bufWriter.flushInner()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tbufWriter.buffer.Write(bytes)\n\n\treturn len(bytes), nil\n}\n\nfunc (bufWriter *bufferedWriter) Close() error {\n\tcloser, ok := bufWriter.innerWriter.(io.Closer)\n\tif ok {\n\t\treturn closer.Close()\n\t}\n\n\treturn nil\n}\n\nfunc (bufWriter *bufferedWriter) Flush() {\n\n\tbufWriter.bufferMutex.Lock()\n\tdefer bufWriter.bufferMutex.Unlock()\n\n\tbufWriter.flushInner()\n}\n\nfunc (bufWriter *bufferedWriter) flushInner() (n int, err error) {\n\tbufferedLen := bufWriter.buffer.Buffered()\n\tflushErr := bufWriter.buffer.Flush()\n\n\treturn bufWriter.buffer.Buffered() - bufferedLen, flushErr\n}\n\nfunc (bufWriter *bufferedWriter) flushBuffer() {\n\tbufWriter.bufferMutex.Lock()\n\tdefer bufWriter.bufferMutex.Unlock()\n\n\tbufWriter.buffer.Flush()\n}\n\nfunc (bufWriter *bufferedWriter) flushPeriodically() {\n\tif bufWriter.flushPeriod > 0 {\n\t\tticker := time.NewTicker(bufWriter.flushPeriod)\n\t\tfor {\n\t\t\t<-ticker.C\n\t\t\tbufWriter.flushBuffer()\n\t\t}\n\t}\n}\n\nfunc (bufWriter *bufferedWriter) String() string {\n\treturn fmt.Sprintf(\"bufferedWriter size: %d, flushPeriod: %d\", bufWriter.bufferSize, bufWriter.flushPeriod)\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_bufferedwriter_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"testing\"\n)\n\nfunc TestChunkWriteOnFilling(t *testing.T) {\n\twriter, _ := newBytesVerifier(t)\n\tbufferedWriter, err := newBufferedWriter(writer, 1024, 0)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected buffered writer creation error: %s\", err.Error())\n\t}\n\n\tbytes := make([]byte, 1000)\n\n\tbufferedWriter.Write(bytes)\n\twriter.ExpectBytes(bytes)\n\tbufferedWriter.Write(bytes)\n}\n\nfunc TestFlushByTimePeriod(t *testing.T) {\n\twriter, _ := newBytesVerifier(t)\n\tbufferedWriter, err := newBufferedWriter(writer, 1024, 10)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected buffered writer creation error: %s\", err.Error())\n\t}\n\n\tbytes := []byte(\"Hello\")\n\n\tfor i := 0; i < 2; i++ {\n\t\twriter.ExpectBytes(bytes)\n\t\tbufferedWriter.Write(bytes)\n\t}\n}\n\nfunc TestBigMessageMustPassMemoryBuffer(t *testing.T) {\n\twriter, _ := newBytesVerifier(t)\n\tbufferedWriter, err := newBufferedWriter(writer, 1024, 0)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected buffered writer creation error: %s\", err.Error())\n\t}\n\n\tbytes := make([]byte, 5000)\n\n\tfor i := 0; i < len(bytes); i++ {\n\t\tbytes[i] = uint8(i % 255)\n\t}\n\n\twriter.ExpectBytes(bytes)\n\tbufferedWriter.Write(bytes)\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_connwriter.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n)\n\n// connWriter is used to write to a stream-oriented network connection.\ntype connWriter struct {\n\tinnerWriter    io.WriteCloser\n\treconnectOnMsg bool\n\treconnect      bool\n\tnet            string\n\taddr           string\n\tuseTLS         bool\n\tconfigTLS      *tls.Config\n}\n\n// Creates writer to the address addr on the network netName.\n// Connection will be opened on each write if reconnectOnMsg = true\nfunc newConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter {\n\tnewWriter := new(connWriter)\n\n\tnewWriter.net = netName\n\tnewWriter.addr = addr\n\tnewWriter.reconnectOnMsg = reconnectOnMsg\n\n\treturn newWriter\n}\n\n// Creates a writer that uses SSL/TLS\nfunc newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter {\n\tnewWriter := new(connWriter)\n\n\tnewWriter.net = netName\n\tnewWriter.addr = addr\n\tnewWriter.reconnectOnMsg = reconnectOnMsg\n\tnewWriter.useTLS = true\n\tnewWriter.configTLS = config\n\n\treturn newWriter\n}\n\nfunc (connWriter *connWriter) Close() error {\n\tif connWriter.innerWriter == nil {\n\t\treturn nil\n\t}\n\n\treturn connWriter.innerWriter.Close()\n}\n\nfunc (connWriter *connWriter) Write(bytes []byte) (n int, err error) {\n\tif connWriter.neededConnectOnMsg() {\n\t\terr = connWriter.connect()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif connWriter.reconnectOnMsg {\n\t\tdefer connWriter.innerWriter.Close()\n\t}\n\n\tn, err = connWriter.innerWriter.Write(bytes)\n\tif err != nil {\n\t\tconnWriter.reconnect = true\n\t}\n\n\treturn\n}\n\nfunc (connWriter *connWriter) String() string {\n\treturn fmt.Sprintf(\"Conn writer: [%s, %s, %v]\", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg)\n}\n\nfunc (connWriter *connWriter) connect() error {\n\tif connWriter.innerWriter != nil {\n\t\tconnWriter.innerWriter.Close()\n\t\tconnWriter.innerWriter = nil\n\t}\n\n\tif connWriter.useTLS {\n\t\tconn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconnWriter.innerWriter = conn\n\n\t\treturn nil\n\t}\n\n\tconn, err := net.Dial(connWriter.net, connWriter.addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttcpConn, ok := conn.(*net.TCPConn)\n\tif ok {\n\t\ttcpConn.SetKeepAlive(true)\n\t}\n\n\tconnWriter.innerWriter = conn\n\n\treturn nil\n}\n\nfunc (connWriter *connWriter) neededConnectOnMsg() bool {\n\tif connWriter.reconnect {\n\t\tconnWriter.reconnect = false\n\t\treturn true\n\t}\n\n\tif connWriter.innerWriter == nil {\n\t\treturn true\n\t}\n\n\treturn connWriter.reconnectOnMsg\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_consolewriter.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport \"fmt\"\n\n// consoleWriter is used to write to console\ntype consoleWriter struct {\n}\n\n// Creates a new console writer. Returns error, if the console writer couldn't be created.\nfunc newConsoleWriter() (writer *consoleWriter, err error) {\n\tnewWriter := new(consoleWriter)\n\n\treturn newWriter, nil\n}\n\n// Create folder and file on WriteLog/Write first call\nfunc (console *consoleWriter) Write(bytes []byte) (int, error) {\n\treturn fmt.Print(string(bytes))\n}\n\nfunc (console *consoleWriter) String() string {\n\treturn \"Console writer\"\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_filewriter.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\n// fileWriter is used to write to a file.\ntype fileWriter struct {\n\tinnerWriter io.WriteCloser\n\tfileName    string\n}\n\n// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created.\nfunc newFileWriter(fileName string) (writer *fileWriter, err error) {\n\tnewWriter := new(fileWriter)\n\tnewWriter.fileName = fileName\n\n\treturn newWriter, nil\n}\n\nfunc (fw *fileWriter) Close() error {\n\tif fw.innerWriter != nil {\n\t\terr := fw.innerWriter.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfw.innerWriter = nil\n\t}\n\treturn nil\n}\n\n// Create folder and file on WriteLog/Write first call\nfunc (fw *fileWriter) Write(bytes []byte) (n int, err error) {\n\tif fw.innerWriter == nil {\n\t\tif err := fw.createFile(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn fw.innerWriter.Write(bytes)\n}\n\nfunc (fw *fileWriter) createFile() error {\n\tfolder, _ := filepath.Split(fw.fileName)\n\tvar err error\n\n\tif 0 != len(folder) {\n\t\terr = os.MkdirAll(folder, defaultDirectoryPermissions)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// If exists\n\tfw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (fw *fileWriter) String() string {\n\treturn fmt.Sprintf(\"File writer: %s\", fw.fileName)\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_filewriter_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst (\n\tmessageLen = 10\n)\n\nvar bytesFileTest = []byte(strings.Repeat(\"A\", messageLen))\n\nfunc TestSimpleFileWriter(t *testing.T) {\n\tt.Logf(\"Starting file writer tests\")\n\tnewFileWriterTester(simplefileWriterTests, simplefileWriterGetter, t).test()\n}\n\n//===============================================================\n\nfunc simplefileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) {\n\treturn newFileWriter(testCase.fileName)\n}\n\n//===============================================================\ntype fileWriterTestCase struct {\n\tfiles       []string\n\tfileName    string\n\trollingType rollingType\n\tfileSize    int64\n\tmaxRolls    int\n\tdatePattern string\n\twriteCount  int\n\tresFiles    []string\n\tnameMode    rollingNameMode\n}\n\nfunc createSimplefileWriterTestCase(fileName string, writeCount int) *fileWriterTestCase {\n\treturn &fileWriterTestCase{[]string{}, fileName, rollingTypeSize, 0, 0, \"\", writeCount, []string{fileName}, 0}\n}\n\nvar simplefileWriterTests = []*fileWriterTestCase{\n\tcreateSimplefileWriterTestCase(\"log.testlog\", 1),\n\tcreateSimplefileWriterTestCase(\"log.testlog\", 50),\n\tcreateSimplefileWriterTestCase(filepath.Join(\"dir\", \"log.testlog\"), 50),\n}\n\n//===============================================================\n\ntype fileWriterTester struct {\n\ttestCases    []*fileWriterTestCase\n\twriterGetter func(*fileWriterTestCase) (io.WriteCloser, error)\n\tt            *testing.T\n}\n\nfunc newFileWriterTester(\n\ttestCases []*fileWriterTestCase,\n\twriterGetter func(*fileWriterTestCase) (io.WriteCloser, error),\n\tt *testing.T) *fileWriterTester {\n\n\treturn &fileWriterTester{testCases, writerGetter, t}\n}\n\nfunc isWriterTestFile(fn string) bool {\n\treturn strings.Contains(fn, \".testlog\")\n}\n\nfunc cleanupWriterTest(t *testing.T) {\n\ttoDel, err := getDirFilePaths(\".\", isWriterTestFile, true)\n\tif nil != err {\n\t\tt.Fatal(\"Cannot list files in test directory!\")\n\t}\n\n\tfor _, p := range toDel {\n\t\tif err = tryRemoveFile(p); nil != err {\n\t\t\tt.Errorf(\"cannot remove file %s in test directory: %s\", p, err.Error())\n\t\t}\n\t}\n\n\tif err = os.RemoveAll(\"dir\"); nil != err {\n\t\tt.Errorf(\"cannot remove temp test directory: %s\", err.Error())\n\t}\n}\n\nfunc getWriterTestResultFiles() ([]string, error) {\n\tvar p []string\n\n\tvisit := func(path string, f os.FileInfo, err error) error {\n\t\tif !f.IsDir() && isWriterTestFile(path) {\n\t\t\tabs, err := filepath.Abs(path)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"filepath.Abs failed for %s\", path)\n\t\t\t}\n\n\t\t\tp = append(p, abs)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\terr := filepath.Walk(\".\", visit)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}\n\nfunc (tester *fileWriterTester) testCase(testCase *fileWriterTestCase, testNum int) {\n\tdefer cleanupWriterTest(tester.t)\n\n\ttester.t.Logf(\"Start test  [%v]\\n\", testNum)\n\n\tfor _, filePath := range testCase.files {\n\t\tdir, _ := filepath.Split(filePath)\n\n\t\tvar err error\n\n\t\tif 0 != len(dir) {\n\t\t\terr = os.MkdirAll(dir, defaultDirectoryPermissions)\n\t\t\tif err != nil {\n\t\t\t\ttester.t.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tfi, err := os.Create(filePath)\n\t\tif err != nil {\n\t\t\ttester.t.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\terr = fi.Close()\n\t\tif err != nil {\n\t\t\ttester.t.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfwc, err := tester.writerGetter(testCase)\n\tif err != nil {\n\t\ttester.t.Error(err)\n\t\treturn\n\t}\n\tdefer fwc.Close()\n\n\ttester.performWrite(fwc, testCase.writeCount)\n\n\tfiles, err := getWriterTestResultFiles()\n\tif err != nil {\n\t\ttester.t.Error(err)\n\t\treturn\n\t}\n\n\ttester.checkRequiredFilesExist(testCase, files)\n\ttester.checkJustRequiredFilesExist(testCase, files)\n\n}\n\nfunc (tester *fileWriterTester) test() {\n\tfor i, tc := range tester.testCases {\n\t\tcleanupWriterTest(tester.t)\n\t\ttester.testCase(tc, i)\n\t}\n}\n\nfunc (tester *fileWriterTester) performWrite(fileWriter io.Writer, count int) {\n\tfor i := 0; i < count; i++ {\n\t\t_, err := fileWriter.Write(bytesFileTest)\n\n\t\tif err != nil {\n\t\t\ttester.t.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (tester *fileWriterTester) checkRequiredFilesExist(testCase *fileWriterTestCase, files []string) {\n\tvar found bool\n\tfor _, expected := range testCase.resFiles {\n\t\tfound = false\n\t\texAbs, err := filepath.Abs(expected)\n\t\tif err != nil {\n\t\t\ttester.t.Errorf(\"filepath.Abs failed for %s\", expected)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, f := range files {\n\t\t\tif af, e := filepath.Abs(f); e == nil {\n\t\t\t\ttester.t.Log(af)\n\t\t\t\tif exAbs == af {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ttester.t.Errorf(\"filepath.Abs failed for %s\", f)\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\ttester.t.Errorf(\"expected file: %s doesn't exist. Got %v\\n\", exAbs, files)\n\t\t}\n\t}\n}\n\nfunc (tester *fileWriterTester) checkJustRequiredFilesExist(testCase *fileWriterTestCase, files []string) {\n\tfor _, f := range files {\n\t\tfound := false\n\t\tfor _, expected := range testCase.resFiles {\n\n\t\t\texAbs, err := filepath.Abs(expected)\n\t\t\tif err != nil {\n\t\t\t\ttester.t.Errorf(\"filepath.Abs failed for %s\", expected)\n\t\t\t} else {\n\t\t\t\tif exAbs == f {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\ttester.t.Errorf(\"unexpected file: %v\", f)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_formattedwriter.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\ntype formattedWriter struct {\n\twriter    io.Writer\n\tformatter *formatter\n}\n\nfunc newFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) {\n\tif formatter == nil {\n\t\treturn nil, errors.New(\"formatter can not be nil\")\n\t}\n\n\treturn &formattedWriter{writer, formatter}, nil\n}\n\nfunc (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error {\n\tstr := formattedWriter.formatter.Format(message, level, context)\n\t_, err := formattedWriter.writer.Write([]byte(str))\n\treturn err\n}\n\nfunc (formattedWriter *formattedWriter) String() string {\n\treturn fmt.Sprintf(\"writer: %s, format: %s\", formattedWriter.writer, formattedWriter.formatter)\n}\n\nfunc (formattedWriter *formattedWriter) Writer() io.Writer {\n\treturn formattedWriter.writer\n}\n\nfunc (formattedWriter *formattedWriter) Format() *formatter {\n\treturn formattedWriter.formatter\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_formattedwriter_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"testing\"\n)\n\nfunc TestformattedWriter(t *testing.T) {\n\tformatStr := \"%Level %LEVEL %Msg\"\n\tmessage := \"message\"\n\tvar logLevel = LogLevel(TraceLvl)\n\n\tbytesVerifier, err := newBytesVerifier(t)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tformatter, err := newFormatter(formatStr)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\twriter, err := newFormattedWriter(bytesVerifier, formatter)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tcontext, err := currentContext()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tlogMessage := formatter.Format(message, logLevel, context)\n\n\tbytesVerifier.ExpectBytes([]byte(logMessage))\n\twriter.Write(message, logLevel, context)\n\tbytesVerifier.MustNotExpect()\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_rollingfilewriter.go",
    "content": "// Copyright (c) 2013 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n// Common constants\nconst (\n\trollingLogHistoryDelimiter = \".\"\n)\n\n// Types of the rolling writer: roll by date, by time, etc.\ntype rollingType uint8\n\nconst (\n\trollingTypeSize = iota\n\trollingTypeTime\n)\n\n// Types of the rolled file naming mode: prefix, postfix, etc.\ntype rollingNameMode uint8\n\nconst (\n\trollingNameModePostfix = iota\n\trollingNameModePrefix\n)\n\nvar rollingNameModesStringRepresentation = map[rollingNameMode]string{\n\trollingNameModePostfix: \"postfix\",\n\trollingNameModePrefix:  \"prefix\",\n}\n\nfunc rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) {\n\tfor tp, tpStr := range rollingNameModesStringRepresentation {\n\t\tif tpStr == rollingNameStr {\n\t\t\treturn tp, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\ntype rollingIntervalType uint8\n\nconst (\n\trollingIntervalAny = iota\n\trollingIntervalDaily\n)\n\nvar rollingInvervalTypesStringRepresentation = map[rollingIntervalType]string{\n\trollingIntervalDaily: \"daily\",\n}\n\nfunc rollingIntervalTypeFromString(rollingTypeStr string) (rollingIntervalType, bool) {\n\tfor tp, tpStr := range rollingInvervalTypesStringRepresentation {\n\t\tif tpStr == rollingTypeStr {\n\t\t\treturn tp, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\nvar rollingTypesStringRepresentation = map[rollingType]string{\n\trollingTypeSize: \"size\",\n\trollingTypeTime: \"date\",\n}\n\nfunc rollingTypeFromString(rollingTypeStr string) (rollingType, bool) {\n\tfor tp, tpStr := range rollingTypesStringRepresentation {\n\t\tif tpStr == rollingTypeStr {\n\t\t\treturn tp, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\n// Old logs archivation type.\ntype rollingArchiveType uint8\n\nconst (\n\trollingArchiveNone = iota\n\trollingArchiveZip\n)\n\nvar rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{\n\trollingArchiveNone: \"none\",\n\trollingArchiveZip:  \"zip\",\n}\n\nfunc rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) {\n\tfor tp, tpStr := range rollingArchiveTypesStringRepresentation {\n\t\tif tpStr == rollingArchiveTypeStr {\n\t\t\treturn tp, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\n// Default names for different archivation types\nvar rollingArchiveTypesDefaultNames = map[rollingArchiveType]string{\n\trollingArchiveZip: \"log.zip\",\n}\n\n// rollerVirtual is an interface that represents all virtual funcs that are\n// called in different rolling writer subtypes.\ntype rollerVirtual interface {\n\tneedsToRoll() (bool, error)                         // Returns true if needs to switch to another file.\n\tisFileRollNameValid(rname string) bool              // Returns true if logger roll file name (postfix/prefix/etc.) is ok.\n\tsortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger.\n\n\t// Creates a new froll history file using the contents of current file and special filename of the latest roll (prefix/ postfix).\n\t// If lastRollName is empty (\"\"), then it means that there is no latest roll (current is the first one)\n\tgetNewHistoryRollFileName(lastRollName string) string\n\tgetCurrentModifiedFileName(originalFileName string) string // Returns filename modified according to specific logger rules\n}\n\n// rollingFileWriter writes received messages to a file, until time interval passes\n// or file exceeds a specified limit. After that the current log file is renamed\n// and writer starts to log into a new file. You can set a limit for such renamed\n// files count, if you want, and then the rolling writer would delete older ones when\n// the files count exceed the specified limit.\ntype rollingFileWriter struct {\n\tfileName         string // current file name. May differ from original in date rolling loggers\n\toriginalFileName string // original one\n\tcurrentDirPath   string\n\tcurrentFile      *os.File\n\tcurrentFileSize  int64\n\trollingType      rollingType // Rolling mode (Files roll by size/date/...)\n\tarchiveType      rollingArchiveType\n\tarchivePath      string\n\tmaxRolls         int\n\tnameMode         rollingNameMode\n\tself             rollerVirtual // Used for virtual calls\n}\n\nfunc newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode) (*rollingFileWriter, error) {\n\trw := new(rollingFileWriter)\n\trw.currentDirPath, rw.fileName = filepath.Split(fpath)\n\tif len(rw.currentDirPath) == 0 {\n\t\trw.currentDirPath = \".\"\n\t}\n\trw.originalFileName = rw.fileName\n\n\trw.rollingType = rtype\n\trw.archiveType = atype\n\trw.archivePath = apath\n\trw.nameMode = namemode\n\trw.maxRolls = maxr\n\treturn rw, nil\n}\n\nfunc (rw *rollingFileWriter) hasRollName(file string) bool {\n\tswitch rw.nameMode {\n\tcase rollingNameModePostfix:\n\t\trname := rw.originalFileName + rollingLogHistoryDelimiter\n\t\treturn strings.HasPrefix(file, rname)\n\tcase rollingNameModePrefix:\n\t\trname := rollingLogHistoryDelimiter + rw.originalFileName\n\t\treturn strings.HasSuffix(file, rname)\n\t}\n\treturn false\n}\n\nfunc (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string {\n\tswitch rw.nameMode {\n\tcase rollingNameModePostfix:\n\t\treturn originalName + rollingLogHistoryDelimiter + rollname\n\tcase rollingNameModePrefix:\n\t\treturn rollname + rollingLogHistoryDelimiter + originalName\n\t}\n\treturn \"\"\n}\n\nfunc (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) {\n\tfiles, err := getDirFilePaths(rw.currentDirPath, nil, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar validRollNames []string\n\tfor _, file := range files {\n\t\tif file != rw.fileName && rw.hasRollName(file) {\n\t\t\trname := rw.getFileRollName(file)\n\t\t\tif rw.self.isFileRollNameValid(rname) {\n\t\t\t\tvalidRollNames = append(validRollNames, rname)\n\t\t\t}\n\t\t}\n\t}\n\tsortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalidSortedFiles := make([]string, len(sortedTails))\n\tfor i, v := range sortedTails {\n\t\tvalidSortedFiles[i] = rw.createFullFileName(rw.originalFileName, v)\n\t}\n\treturn validSortedFiles, nil\n}\n\nfunc (rw *rollingFileWriter) createFileAndFolderIfNeeded() error {\n\tvar err error\n\n\tif len(rw.currentDirPath) != 0 {\n\t\terr = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\trw.fileName = rw.self.getCurrentModifiedFileName(rw.originalFileName)\n\tfilePath := filepath.Join(rw.currentDirPath, rw.fileName)\n\n\t// If exists\n\tstat, err := os.Lstat(filePath)\n\tif err == nil {\n\t\trw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, defaultFilePermissions)\n\n\t\tstat, err = os.Lstat(filePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trw.currentFileSize = stat.Size()\n\t} else {\n\t\trw.currentFile, err = os.Create(filePath)\n\t\trw.currentFileSize = 0\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (rw *rollingFileWriter) deleteOldRolls(history []string) error {\n\tif rw.maxRolls <= 0 {\n\t\treturn nil\n\t}\n\n\trollsToDelete := len(history) - rw.maxRolls\n\tif rollsToDelete <= 0 {\n\t\treturn nil\n\t}\n\n\tswitch rw.archiveType {\n\tcase rollingArchiveZip:\n\t\tvar files map[string][]byte\n\n\t\t// If archive exists\n\t\t_, err := os.Lstat(rw.archivePath)\n\t\tif nil == err {\n\t\t\t// Extract files and content from it\n\t\t\tfiles, err = unzip(rw.archivePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Remove the original file\n\t\t\terr = tryRemoveFile(rw.archivePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfiles = make(map[string][]byte)\n\t\t}\n\n\t\t// Add files to the existing files map, filled above\n\t\tfor i := 0; i < rollsToDelete; i++ {\n\t\t\trollPath := filepath.Join(rw.currentDirPath, history[i])\n\t\t\tbts, err := ioutil.ReadFile(rollPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfiles[rollPath] = bts\n\t\t}\n\n\t\t// Put the final file set to zip file.\n\t\terr = createZip(rw.archivePath, files)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// In all cases (archive files or not) the files should be deleted.\n\tfor i := 0; i < rollsToDelete; i++ {\n\t\trollPath := filepath.Join(rw.currentDirPath, history[i])\n\t\terr := tryRemoveFile(rollPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (rw *rollingFileWriter) getFileRollName(fileName string) string {\n\tswitch rw.nameMode {\n\tcase rollingNameModePostfix:\n\t\treturn fileName[len(rw.originalFileName+rollingLogHistoryDelimiter):]\n\tcase rollingNameModePrefix:\n\t\treturn fileName[:len(fileName)-len(rw.originalFileName+rollingLogHistoryDelimiter)]\n\t}\n\treturn \"\"\n}\n\nfunc (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) {\n\tif rw.currentFile == nil {\n\t\terr := rw.createFileAndFolderIfNeeded()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\t// needs to roll if:\n\t//   * file roller max file size exceeded OR\n\t//   * time roller interval passed\n\tnr, err := rw.self.needsToRoll()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif nr {\n\t\t// First, close current file.\n\t\terr = rw.currentFile.Close()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t// Current history of all previous log files.\n\t\t// For file roller it may be like this:\n\t\t//     * ...\n\t\t//     * file.log.4\n\t\t//     * file.log.5\n\t\t//     * file.log.6\n\t\t//\n\t\t// For date roller it may look like this:\n\t\t//     * ...\n\t\t//     * file.log.11.Aug.13\n\t\t//     * file.log.15.Aug.13\n\t\t//     * file.log.16.Aug.13\n\t\t// Sorted log history does NOT include current file.\n\t\thistory, err := rw.getSortedLogHistory()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t// Renames current file to create a new roll history entry\n\t\t// For file roller it may be like this:\n\t\t//     * ...\n\t\t//     * file.log.4\n\t\t//     * file.log.5\n\t\t//     * file.log.6\n\t\t//     n file.log.7  <---- RENAMED (from file.log)\n\t\t// Time rollers that doesn't modify file names (e.g. 'date' roller) skip this logic.\n\t\tvar newHistoryName string\n\t\tvar newRollMarkerName string\n\t\tif len(history) > 0 {\n\t\t\t// Create new rname name using last history file name\n\t\t\tnewRollMarkerName = rw.self.getNewHistoryRollFileName(rw.getFileRollName(history[len(history)-1]))\n\t\t} else {\n\t\t\t// Create first rname name\n\t\t\tnewRollMarkerName = rw.self.getNewHistoryRollFileName(\"\")\n\t\t}\n\n\t\tif len(newRollMarkerName) != 0 {\n\t\t\tnewHistoryName = rw.createFullFileName(rw.fileName, newRollMarkerName)\n\t\t} else {\n\t\t\tnewHistoryName = rw.fileName\n\t\t}\n\n\t\tif newHistoryName != rw.fileName {\n\t\t\terr = os.Rename(filepath.Join(rw.currentDirPath, rw.fileName), filepath.Join(rw.currentDirPath, newHistoryName))\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\t// Finally, add the newly added history file to the history archive\n\t\t// and, if after that the archive exceeds the allowed max limit, older rolls\n\t\t// must the removed/archived.\n\t\thistory = append(history, newHistoryName)\n\t\tif len(history) > rw.maxRolls {\n\t\t\terr = rw.deleteOldRolls(history)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\terr = rw.createFileAndFolderIfNeeded()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\trw.currentFileSize += int64(len(bytes))\n\treturn rw.currentFile.Write(bytes)\n}\n\nfunc (rw *rollingFileWriter) Close() error {\n\tif rw.currentFile != nil {\n\t\te := rw.currentFile.Close()\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\trw.currentFile = nil\n\t}\n\treturn nil\n}\n\n// =============================================================================================\n//      Different types of rolling writers\n// =============================================================================================\n\n// --------------------------------------------------\n//      Rolling writer by SIZE\n// --------------------------------------------------\n\n// rollingFileWriterSize performs roll when file exceeds a specified limit.\ntype rollingFileWriterSize struct {\n\t*rollingFileWriter\n\tmaxFileSize int64\n}\n\nfunc newRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode) (*rollingFileWriterSize, error) {\n\trw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trws := &rollingFileWriterSize{rw, maxSize}\n\trws.self = rws\n\treturn rws, nil\n}\n\nfunc (rws *rollingFileWriterSize) needsToRoll() (bool, error) {\n\treturn rws.currentFileSize >= rws.maxFileSize, nil\n}\n\nfunc (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool {\n\tif len(rname) == 0 {\n\t\treturn false\n\t}\n\t_, err := strconv.Atoi(rname)\n\treturn err == nil\n}\n\ntype rollSizeFileTailsSlice []string\n\nfunc (p rollSizeFileTailsSlice) Len() int { return len(p) }\nfunc (p rollSizeFileTailsSlice) Less(i, j int) bool {\n\tv1, _ := strconv.Atoi(p[i])\n\tv2, _ := strconv.Atoi(p[j])\n\treturn v1 < v2\n}\nfunc (p rollSizeFileTailsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\nfunc (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) {\n\tss := rollSizeFileTailsSlice(fs)\n\tsort.Sort(ss)\n\treturn ss, nil\n}\n\nfunc (rws *rollingFileWriterSize) getNewHistoryRollFileName(lastRollName string) string {\n\tv := 0\n\tif len(lastRollName) != 0 {\n\t\tv, _ = strconv.Atoi(lastRollName)\n\t}\n\treturn fmt.Sprintf(\"%d\", v+1)\n}\n\nfunc (rws *rollingFileWriterSize) getCurrentModifiedFileName(originalFileName string) string {\n\treturn originalFileName\n}\n\nfunc (rws *rollingFileWriterSize) String() string {\n\treturn fmt.Sprintf(\"Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v\",\n\t\trws.fileName,\n\t\trollingArchiveTypesStringRepresentation[rws.archiveType],\n\t\trws.archivePath,\n\t\trws.maxFileSize,\n\t\trws.maxRolls)\n}\n\n// --------------------------------------------------\n//      Rolling writer by TIME\n// --------------------------------------------------\n\n// rollingFileWriterTime performs roll when a specified time interval has passed.\ntype rollingFileWriterTime struct {\n\t*rollingFileWriter\n\ttimePattern         string\n\tinterval            rollingIntervalType\n\tcurrentTimeFileName string\n}\n\nfunc newRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int,\n\ttimePattern string, interval rollingIntervalType, namemode rollingNameMode) (*rollingFileWriterTime, error) {\n\n\trw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trws := &rollingFileWriterTime{rw, timePattern, interval, \"\"}\n\trws.self = rws\n\treturn rws, nil\n}\n\nfunc (rwt *rollingFileWriterTime) needsToRoll() (bool, error) {\n\tswitch rwt.nameMode {\n\tcase rollingNameModePostfix:\n\t\tif rwt.originalFileName+rollingLogHistoryDelimiter+time.Now().Format(rwt.timePattern) == rwt.fileName {\n\t\t\treturn false, nil\n\t\t}\n\tcase rollingNameModePrefix:\n\t\tif time.Now().Format(rwt.timePattern)+rollingLogHistoryDelimiter+rwt.originalFileName == rwt.fileName {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tif rwt.interval == rollingIntervalAny {\n\t\treturn true, nil\n\t}\n\n\ttprev, err := time.ParseInLocation(rwt.timePattern, rwt.getFileRollName(rwt.fileName), time.Local)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdiff := time.Now().Sub(tprev)\n\tswitch rwt.interval {\n\tcase rollingIntervalDaily:\n\t\treturn diff >= 24*time.Hour, nil\n\t}\n\treturn false, fmt.Errorf(\"unknown interval type: %d\", rwt.interval)\n}\n\nfunc (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool {\n\tif len(rname) == 0 {\n\t\treturn false\n\t}\n\t_, err := time.ParseInLocation(rwt.timePattern, rname, time.Local)\n\treturn err == nil\n}\n\ntype rollTimeFileTailsSlice struct {\n\tdata    []string\n\tpattern string\n}\n\nfunc (p rollTimeFileTailsSlice) Len() int { return len(p.data) }\nfunc (p rollTimeFileTailsSlice) Less(i, j int) bool {\n\tt1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local)\n\tt2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local)\n\treturn t1.Before(t2)\n}\nfunc (p rollTimeFileTailsSlice) Swap(i, j int) { p.data[i], p.data[j] = p.data[j], p.data[i] }\n\nfunc (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) {\n\tss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern}\n\tsort.Sort(ss)\n\treturn ss.data, nil\n}\n\nfunc (rwt *rollingFileWriterTime) getNewHistoryRollFileName(lastRollName string) string {\n\treturn \"\"\n}\n\nfunc (rwt *rollingFileWriterTime) getCurrentModifiedFileName(originalFileName string) string {\n\tswitch rwt.nameMode {\n\tcase rollingNameModePostfix:\n\t\treturn originalFileName + rollingLogHistoryDelimiter + time.Now().Format(rwt.timePattern)\n\tcase rollingNameModePrefix:\n\t\treturn time.Now().Format(rwt.timePattern) + rollingLogHistoryDelimiter + originalFileName\n\t}\n\treturn \"\"\n}\n\nfunc (rwt *rollingFileWriterTime) String() string {\n\treturn fmt.Sprintf(\"Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, maxInterval: %v, pattern: %s, maxRolls: %v\",\n\t\trwt.fileName,\n\t\trollingArchiveTypesStringRepresentation[rwt.archiveType],\n\t\trwt.archivePath,\n\t\trwt.interval,\n\t\trwt.timePattern,\n\t\trwt.maxRolls)\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_rollingfilewriter_test.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n)\n\n// fileWriterTestCase is declared in writers_filewriter_test.go\n\nfunc createRollingSizeFileWriterTestCase(\n\tfiles []string,\n\tfileName string,\n\tfileSize int64,\n\tmaxRolls int,\n\twriteCount int,\n\tresFiles []string,\n\tnameMode rollingNameMode) *fileWriterTestCase {\n\n\treturn &fileWriterTestCase{files, fileName, rollingTypeSize, fileSize, maxRolls, \"\", writeCount, resFiles, nameMode}\n}\n\nfunc createRollingDatefileWriterTestCase(\n\tfiles []string,\n\tfileName string,\n\tdatePattern string,\n\twriteCount int,\n\tresFiles []string,\n\tnameMode rollingNameMode) *fileWriterTestCase {\n\n\treturn &fileWriterTestCase{files, fileName, rollingTypeTime, 0, 0, datePattern, writeCount, resFiles, nameMode}\n}\n\nfunc TestRollingFileWriter(t *testing.T) {\n\tt.Logf(\"Starting rolling file writer tests\")\n\tnewFileWriterTester(rollingfileWriterTests, rollingFileWriterGetter, t).test()\n}\n\n//===============================================================\n\nfunc rollingFileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) {\n\tif testCase.rollingType == rollingTypeSize {\n\t\treturn newRollingFileWriterSize(testCase.fileName, rollingArchiveNone, \"\", testCase.fileSize, testCase.maxRolls, testCase.nameMode)\n\t} else if testCase.rollingType == rollingTypeTime {\n\t\treturn newRollingFileWriterTime(testCase.fileName, rollingArchiveNone, \"\", -1, testCase.datePattern, rollingIntervalDaily, testCase.nameMode)\n\t}\n\n\treturn nil, fmt.Errorf(\"incorrect rollingType\")\n}\n\n//===============================================================\nvar rollingfileWriterTests = []*fileWriterTestCase{\n\tcreateRollingSizeFileWriterTestCase([]string{}, \"log.testlog\", 10, 10, 1, []string{\"log.testlog\"}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, \"log.testlog\", 10, 10, 2, []string{\"log.testlog\", \"log.testlog.1\"}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{\"1.log.testlog\"}, \"log.testlog\", 10, 10, 2, []string{\"log.testlog\", \"1.log.testlog\", \"2.log.testlog\"}, rollingNameModePrefix),\n\tcreateRollingSizeFileWriterTestCase([]string{\"log.testlog.1\"}, \"log.testlog\", 10, 1, 2, []string{\"log.testlog\", \"log.testlog.2\"}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, \"log.testlog\", 10, 1, 2, []string{\"log.testlog\", \"log.testlog.1\"}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{\"log.testlog.9\"}, \"log.testlog\", 10, 1, 2, []string{\"log.testlog\", \"log.testlog.10\"}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{\"log.testlog.a\", \"log.testlog.1b\"}, \"log.testlog\", 10, 1, 2, []string{\"log.testlog\", \"log.testlog.1\", \"log.testlog.a\", \"log.testlog.1b\"}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/1.log.testlog`}, rollingNameModePrefix),\n\tcreateRollingSizeFileWriterTestCase([]string{`dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{`dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, `./log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{\"dir/dir/log.testlog.a\", \"dir/dir/log.testlog.1b\"}, \"dir/dir/log.testlog\", 10, 1, 2, []string{\"dir/dir/log.testlog\", \"dir/dir/log.testlog.1\", \"dir/dir/log.testlog.a\", \"dir/dir/log.testlog.1b\"}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/log.testlog.1`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{`././dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{`././dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{}, `././log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix),\n\tcreateRollingSizeFileWriterTestCase([]string{\"././dir/dir/log.testlog.a\", \"././dir/dir/log.testlog.1b\"}, \"dir/dir/log.testlog\", 10, 1, 2, []string{\"dir/dir/log.testlog\", \"dir/dir/log.testlog.1\", \"dir/dir/log.testlog.a\", \"dir/dir/log.testlog.1b\"}, rollingNameModePostfix),\n\t// ====================\n}\n"
  },
  {
    "path": "vendor/github.com/cihub/seelog/writers_smtpwriter.go",
    "content": "// Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n//\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice, this\n//    list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/smtp\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\nconst (\n\t// Default subject phrase for sending emails.\n\tDefaultSubjectPhrase = \"Diagnostic message from server: \"\n\n\t// Message subject pattern composed according to RFC 5321.\n\trfc5321SubjectPattern = \"From: %s <%s>\\nSubject: %s\\n\"\n)\n\n// smtpWriter is used to send emails via given SMTP-server.\ntype smtpWriter struct {\n\tauth               smtp.Auth\n\thostName           string\n\thostPort           string\n\thostNameWithPort   string\n\tsenderAddress      string\n\tsenderName         string\n\trecipientAddresses []string\n\tcaCertDirPaths     []string\n\tmailHeaders        []string\n\tsubject            string\n}\n\n// newSMTPWriter returns a new SMTP-writer.\nfunc newSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter {\n\treturn &smtpWriter{\n\t\tauth:               smtp.PlainAuth(\"\", un, pwd, hn),\n\t\thostName:           hn,\n\t\thostPort:           hp,\n\t\thostNameWithPort:   fmt.Sprintf(\"%s:%s\", hn, hp),\n\t\tsenderAddress:      sa,\n\t\tsenderName:         sn,\n\t\trecipientAddresses: ras,\n\t\tcaCertDirPaths:     cacdps,\n\t\tsubject:            subj,\n\t\tmailHeaders:        headers,\n\t}\n}\n\nfunc prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte {\n\theaderLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject);\n\n\t// Build header lines if configured.\n\tif headers != nil && len(headers) > 0 {\n\t\theaderLines += strings.Join(headers, \"\\n\")\n\t\theaderLines += \"\\n\"\n\t}\n\n\treturn append([]byte(headerLines), body...)\n}\n\n// getTLSConfig gets paths of PEM files with certificates,\n// host server name and tries to create an appropriate TLS.Config.\nfunc getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) {\n\tif pemFileDirPaths == nil || len(pemFileDirPaths) == 0 {\n\t\terr = errors.New(\"invalid PEM file paths\")\n\t\treturn\n\t}\n\tpemEncodedContent := []byte{}\n\tvar (\n\t\te     error\n\t\tbytes []byte\n\t)\n\t// Create a file-filter-by-extension, set aside non-pem files.\n\tpemFilePathFilter := func(fp string) bool {\n\t\tif filepath.Ext(fp) == \".pem\" {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, pemFileDirPath := range pemFileDirPaths {\n\t\tpemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Put together all the PEM files to decode them as a whole byte slice.\n\t\tfor _, pfp := range pemFilePaths {\n\t\t\tif bytes, e = ioutil.ReadFile(pfp); e == nil {\n\t\t\t\tpemEncodedContent = append(pemEncodedContent, bytes...)\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot read file: %s: %s\", pfp, e.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\tconfig = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName}\n\tisAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent)\n\tif !isAppended {\n\t\t// Extract this into a separate error.\n\t\terr = errors.New(\"invalid PEM content\")\n\t\treturn\n\t}\n\treturn\n}\n\n// SendMail accepts TLS configuration, connects to the server at addr,\n// switches to TLS if possible, authenticates with mechanism a if possible,\n// and then sends an email from address from, to addresses to, with message msg.\nfunc sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error {\n\tc, err := smtp.Dial(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Check if the server supports STARTTLS extension.\n\tif ok, _ := c.Extension(\"STARTTLS\"); ok {\n\t\tif err = c.StartTLS(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Check if the server supports AUTH extension and use given smtp.Auth.\n\tif a != nil {\n\t\tif isSupported, _ := c.Extension(\"AUTH\"); isSupported {\n\t\t\tif err = c.Auth(a); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// Portion of code from the official smtp.SendMail function,\n\t// see http://golang.org/src/pkg/net/smtp/smtp.go.\n\tif err = c.Mail(from); err != nil {\n\t\treturn err\n\t}\n\tfor _, addr := range to {\n\t\tif err = c.Rcpt(addr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tw, err := c.Data()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.Quit()\n}\n\n// Write pushes a text message properly composed according to RFC 5321\n// to a post server, which sends it to the recipients.\nfunc (smtpw *smtpWriter) Write(data []byte) (int, error) {\n\tvar err error\n\n\tif smtpw.caCertDirPaths == nil {\n\t\terr = smtp.SendMail(\n\t\t\tsmtpw.hostNameWithPort,\n\t\t\tsmtpw.auth,\n\t\t\tsmtpw.senderAddress,\n\t\t\tsmtpw.recipientAddresses,\n\t\t\tprepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),\n\t\t)\n\t} else {\n\t\tconfig, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName)\n\t\tif e != nil {\n\t\t\treturn 0, e\n\t\t}\n\t\terr = sendMailWithTLSConfig(\n\t\t\tconfig,\n\t\t\tsmtpw.hostNameWithPort,\n\t\t\tsmtpw.auth,\n\t\t\tsmtpw.senderAddress,\n\t\t\tsmtpw.recipientAddresses,\n\t\t\tprepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),\n\t\t)\n\t}\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(data), nil\n}\n\n// Close closes down SMTP-connection.\nfunc (smtpw *smtpWriter) Close() error {\n\t// Do nothing as Write method opens and closes connection automatically\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/Makefile",
    "content": "# Go support for Protocol Buffers - Google's data interchange format\n#\n# Copyright 2010 The Go Authors.  All rights reserved.\n# https://github.com/golang/protobuf\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n#     * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#     * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n#     * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\ninstall:\n\tgo install\n\ntest: install generate-test-pbs\n\tgo test\n\n\ngenerate-test-pbs:\n\tmake install\n\tmake -C testdata\n\tprotoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto\n\tmake\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/all_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"runtime/debug\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\t. \"github.com/golang/protobuf/proto/testdata\"\n)\n\nvar globalO *Buffer\n\nfunc old() *Buffer {\n\tif globalO == nil {\n\t\tglobalO = NewBuffer(nil)\n\t}\n\tglobalO.Reset()\n\treturn globalO\n}\n\nfunc equalbytes(b1, b2 []byte, t *testing.T) {\n\tif len(b1) != len(b2) {\n\t\tt.Errorf(\"wrong lengths: 2*%d != %d\", len(b1), len(b2))\n\t\treturn\n\t}\n\tfor i := 0; i < len(b1); i++ {\n\t\tif b1[i] != b2[i] {\n\t\t\tt.Errorf(\"bad byte[%d]:%x %x: %s %s\", i, b1[i], b2[i], b1, b2)\n\t\t}\n\t}\n}\n\nfunc initGoTestField() *GoTestField {\n\tf := new(GoTestField)\n\tf.Label = String(\"label\")\n\tf.Type = String(\"type\")\n\treturn f\n}\n\n// These are all structurally equivalent but the tag numbers differ.\n// (It's remarkable that required, optional, and repeated all have\n// 8 letters.)\nfunc initGoTest_RequiredGroup() *GoTest_RequiredGroup {\n\treturn &GoTest_RequiredGroup{\n\t\tRequiredField: String(\"required\"),\n\t}\n}\n\nfunc initGoTest_OptionalGroup() *GoTest_OptionalGroup {\n\treturn &GoTest_OptionalGroup{\n\t\tRequiredField: String(\"optional\"),\n\t}\n}\n\nfunc initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {\n\treturn &GoTest_RepeatedGroup{\n\t\tRequiredField: String(\"repeated\"),\n\t}\n}\n\nfunc initGoTest(setdefaults bool) *GoTest {\n\tpb := new(GoTest)\n\tif setdefaults {\n\t\tpb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)\n\t\tpb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)\n\t\tpb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)\n\t\tpb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)\n\t\tpb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)\n\t\tpb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)\n\t\tpb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)\n\t\tpb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)\n\t\tpb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)\n\t\tpb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)\n\t\tpb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted\n\t\tpb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)\n\t\tpb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)\n\t}\n\n\tpb.Kind = GoTest_TIME.Enum()\n\tpb.RequiredField = initGoTestField()\n\tpb.F_BoolRequired = Bool(true)\n\tpb.F_Int32Required = Int32(3)\n\tpb.F_Int64Required = Int64(6)\n\tpb.F_Fixed32Required = Uint32(32)\n\tpb.F_Fixed64Required = Uint64(64)\n\tpb.F_Uint32Required = Uint32(3232)\n\tpb.F_Uint64Required = Uint64(6464)\n\tpb.F_FloatRequired = Float32(3232)\n\tpb.F_DoubleRequired = Float64(6464)\n\tpb.F_StringRequired = String(\"string\")\n\tpb.F_BytesRequired = []byte(\"bytes\")\n\tpb.F_Sint32Required = Int32(-32)\n\tpb.F_Sint64Required = Int64(-64)\n\tpb.Requiredgroup = initGoTest_RequiredGroup()\n\n\treturn pb\n}\n\nfunc fail(msg string, b *bytes.Buffer, s string, t *testing.T) {\n\tdata := b.Bytes()\n\tld := len(data)\n\tls := len(s) / 2\n\n\tfmt.Printf(\"fail %s ld=%d ls=%d\\n\", msg, ld, ls)\n\n\t// find the interesting spot - n\n\tn := ls\n\tif ld < ls {\n\t\tn = ld\n\t}\n\tj := 0\n\tfor i := 0; i < n; i++ {\n\t\tbs := hex(s[j])*16 + hex(s[j+1])\n\t\tj += 2\n\t\tif data[i] == bs {\n\t\t\tcontinue\n\t\t}\n\t\tn = i\n\t\tbreak\n\t}\n\tl := n - 10\n\tif l < 0 {\n\t\tl = 0\n\t}\n\th := n + 10\n\n\t// find the interesting spot - n\n\tfmt.Printf(\"is[%d]:\", l)\n\tfor i := l; i < h; i++ {\n\t\tif i >= ld {\n\t\t\tfmt.Printf(\" --\")\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\" %.2x\", data[i])\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tfmt.Printf(\"sb[%d]:\", l)\n\tfor i := l; i < h; i++ {\n\t\tif i >= ls {\n\t\t\tfmt.Printf(\" --\")\n\t\t\tcontinue\n\t\t}\n\t\tbs := hex(s[j])*16 + hex(s[j+1])\n\t\tj += 2\n\t\tfmt.Printf(\" %.2x\", bs)\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tt.Fail()\n\n\t//\tt.Errorf(\"%s: \\ngood: %s\\nbad: %x\", msg, s, b.Bytes())\n\t// Print the output in a partially-decoded format; can\n\t// be helpful when updating the test.  It produces the output\n\t// that is pasted, with minor edits, into the argument to verify().\n\t//\tdata := b.Bytes()\n\t//\tnesting := 0\n\t//\tfor b.Len() > 0 {\n\t//\t\tstart := len(data) - b.Len()\n\t//\t\tvar u uint64\n\t//\t\tu, err := DecodeVarint(b)\n\t//\t\tif err != nil {\n\t//\t\t\tfmt.Printf(\"decode error on varint:\", err)\n\t//\t\t\treturn\n\t//\t\t}\n\t//\t\twire := u & 0x7\n\t//\t\ttag := u >> 3\n\t//\t\tswitch wire {\n\t//\t\tcase WireVarint:\n\t//\t\t\tv, err := DecodeVarint(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on varint:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"  // field %d, encoding %d, value %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, wire, v)\n\t//\t\tcase WireFixed32:\n\t//\t\t\tv, err := DecodeFixed32(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on fixed32:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"  // field %d, encoding %d, value %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, wire, v)\n\t//\t\tcase WireFixed64:\n\t//\t\t\tv, err := DecodeFixed64(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on fixed64:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"  // field %d, encoding %d, value %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, wire, v)\n\t//\t\tcase WireBytes:\n\t//\t\t\tnb, err := DecodeVarint(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on bytes:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tafter_tag := len(data) - b.Len()\n\t//\t\t\tstr := make([]byte, nb)\n\t//\t\t\t_, err = b.Read(str)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on bytes:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\" \\\"%x\\\"  // field %d, encoding %d (FIELD)\\n\",\n\t//\t\t\t\tdata[start:after_tag], str, tag, wire)\n\t//\t\tcase WireStartGroup:\n\t//\t\t\tnesting++\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"\\t\\t// start group field %d level %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, nesting)\n\t//\t\tcase WireEndGroup:\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"\\t\\t// end group field %d level %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, nesting)\n\t//\t\t\tnesting--\n\t//\t\tdefault:\n\t//\t\t\tfmt.Printf(\"unrecognized wire type %d\\n\", wire)\n\t//\t\t\treturn\n\t//\t\t}\n\t//\t}\n}\n\nfunc hex(c uint8) uint8 {\n\tif '0' <= c && c <= '9' {\n\t\treturn c - '0'\n\t}\n\tif 'a' <= c && c <= 'f' {\n\t\treturn 10 + c - 'a'\n\t}\n\tif 'A' <= c && c <= 'F' {\n\t\treturn 10 + c - 'A'\n\t}\n\treturn 0\n}\n\nfunc equal(b []byte, s string, t *testing.T) bool {\n\tif 2*len(b) != len(s) {\n\t\t//\t\tfail(fmt.Sprintf(\"wrong lengths: 2*%d != %d\", len(b), len(s)), b, s, t)\n\t\tfmt.Printf(\"wrong lengths: 2*%d != %d\\n\", len(b), len(s))\n\t\treturn false\n\t}\n\tfor i, j := 0, 0; i < len(b); i, j = i+1, j+2 {\n\t\tx := hex(s[j])*16 + hex(s[j+1])\n\t\tif b[i] != x {\n\t\t\t//\t\t\tfail(fmt.Sprintf(\"bad byte[%d]:%x %x\", i, b[i], x), b, s, t)\n\t\t\tfmt.Printf(\"bad byte[%d]:%x %x\", i, b[i], x)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc overify(t *testing.T, pb *GoTest, expected string) {\n\to := old()\n\terr := o.Marshal(pb)\n\tif err != nil {\n\t\tfmt.Printf(\"overify marshal-1 err = %v\", err)\n\t\to.DebugPrint(\"\", o.Bytes())\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\tif !equal(o.Bytes(), expected, t) {\n\t\to.DebugPrint(\"overify neq 1\", o.Bytes())\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\n\t// Now test Unmarshal by recreating the original buffer.\n\tpbd := new(GoTest)\n\terr = o.Unmarshal(pbd)\n\tif err != nil {\n\t\tt.Fatalf(\"overify unmarshal err = %v\", err)\n\t\to.DebugPrint(\"\", o.Bytes())\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\to.Reset()\n\terr = o.Marshal(pbd)\n\tif err != nil {\n\t\tt.Errorf(\"overify marshal-2 err = %v\", err)\n\t\to.DebugPrint(\"\", o.Bytes())\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\tif !equal(o.Bytes(), expected, t) {\n\t\to.DebugPrint(\"overify neq 2\", o.Bytes())\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n}\n\n// Simple tests for numeric encode/decode primitives (varint, etc.)\nfunc TestNumericPrimitives(t *testing.T) {\n\tfor i := uint64(0); i < 1e6; i += 111 {\n\t\to := old()\n\t\tif o.EncodeVarint(i) != nil {\n\t\t\tt.Error(\"EncodeVarint\")\n\t\t\tbreak\n\t\t}\n\t\tx, e := o.DecodeVarint()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeVarint\")\n\t\t}\n\t\tif x != i {\n\t\t\tt.Fatal(\"varint decode fail:\", i, x)\n\t\t}\n\n\t\to = old()\n\t\tif o.EncodeFixed32(i) != nil {\n\t\t\tt.Fatal(\"encFixed32\")\n\t\t}\n\t\tx, e = o.DecodeFixed32()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"decFixed32\")\n\t\t}\n\t\tif x != i {\n\t\t\tt.Fatal(\"fixed32 decode fail:\", i, x)\n\t\t}\n\n\t\to = old()\n\t\tif o.EncodeFixed64(i*1234567) != nil {\n\t\t\tt.Error(\"encFixed64\")\n\t\t\tbreak\n\t\t}\n\t\tx, e = o.DecodeFixed64()\n\t\tif e != nil {\n\t\t\tt.Error(\"decFixed64\")\n\t\t\tbreak\n\t\t}\n\t\tif x != i*1234567 {\n\t\t\tt.Error(\"fixed64 decode fail:\", i*1234567, x)\n\t\t\tbreak\n\t\t}\n\n\t\to = old()\n\t\ti32 := int32(i - 12345)\n\t\tif o.EncodeZigzag32(uint64(i32)) != nil {\n\t\t\tt.Fatal(\"EncodeZigzag32\")\n\t\t}\n\t\tx, e = o.DecodeZigzag32()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeZigzag32\")\n\t\t}\n\t\tif x != uint64(uint32(i32)) {\n\t\t\tt.Fatal(\"zigzag32 decode fail:\", i32, x)\n\t\t}\n\n\t\to = old()\n\t\ti64 := int64(i - 12345)\n\t\tif o.EncodeZigzag64(uint64(i64)) != nil {\n\t\t\tt.Fatal(\"EncodeZigzag64\")\n\t\t}\n\t\tx, e = o.DecodeZigzag64()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeZigzag64\")\n\t\t}\n\t\tif x != uint64(i64) {\n\t\t\tt.Fatal(\"zigzag64 decode fail:\", i64, x)\n\t\t}\n\t}\n}\n\n// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.\ntype fakeMarshaler struct {\n\tb   []byte\n\terr error\n}\n\nfunc (f fakeMarshaler) Marshal() ([]byte, error) {\n\treturn f.b, f.err\n}\n\nfunc (f fakeMarshaler) String() string {\n\treturn fmt.Sprintf(\"Bytes: %v Error: %v\", f.b, f.err)\n}\n\nfunc (f fakeMarshaler) ProtoMessage() {}\n\nfunc (f fakeMarshaler) Reset() {}\n\n// Simple tests for proto messages that implement the Marshaler interface.\nfunc TestMarshalerEncoding(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tm       Message\n\t\twant    []byte\n\t\twantErr error\n\t}{\n\t\t{\n\t\t\tname: \"Marshaler that fails\",\n\t\t\tm: fakeMarshaler{\n\t\t\t\terr: errors.New(\"some marshal err\"),\n\t\t\t\tb:   []byte{5, 6, 7},\n\t\t\t},\n\t\t\t// Since there's an error, nothing should be written to buffer.\n\t\t\twant:    nil,\n\t\t\twantErr: errors.New(\"some marshal err\"),\n\t\t},\n\t\t{\n\t\t\tname: \"Marshaler that succeeds\",\n\t\t\tm: fakeMarshaler{\n\t\t\t\tb: []byte{0, 1, 2, 3, 4, 127, 255},\n\t\t\t},\n\t\t\twant:    []byte{0, 1, 2, 3, 4, 127, 255},\n\t\t\twantErr: nil,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tb := NewBuffer(nil)\n\t\terr := b.Marshal(test.m)\n\t\tif !reflect.DeepEqual(test.wantErr, err) {\n\t\t\tt.Errorf(\"%s: got err %v wanted %v\", test.name, err, test.wantErr)\n\t\t}\n\t\tif !reflect.DeepEqual(test.want, b.Bytes()) {\n\t\t\tt.Errorf(\"%s: got bytes %v wanted %v\", test.name, b.Bytes(), test.want)\n\t\t}\n\t}\n}\n\n// Simple tests for bytes\nfunc TestBytesPrimitives(t *testing.T) {\n\to := old()\n\tbytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}\n\tif o.EncodeRawBytes(bytes) != nil {\n\t\tt.Error(\"EncodeRawBytes\")\n\t}\n\tdecb, e := o.DecodeRawBytes(false)\n\tif e != nil {\n\t\tt.Error(\"DecodeRawBytes\")\n\t}\n\tequalbytes(bytes, decb, t)\n}\n\n// Simple tests for strings\nfunc TestStringPrimitives(t *testing.T) {\n\to := old()\n\ts := \"now is the time\"\n\tif o.EncodeStringBytes(s) != nil {\n\t\tt.Error(\"enc_string\")\n\t}\n\tdecs, e := o.DecodeStringBytes()\n\tif e != nil {\n\t\tt.Error(\"dec_string\")\n\t}\n\tif s != decs {\n\t\tt.Error(\"string encode/decode fail:\", s, decs)\n\t}\n}\n\n// Do we catch the \"required bit not set\" case?\nfunc TestRequiredBit(t *testing.T) {\n\to := old()\n\tpb := new(GoTest)\n\terr := o.Marshal(pb)\n\tif err == nil {\n\t\tt.Error(\"did not catch missing required fields\")\n\t} else if strings.Index(err.Error(), \"Kind\") < 0 {\n\t\tt.Error(\"wrong error type:\", err)\n\t}\n}\n\n// Check that all fields are nil.\n// Clearly silly, and a residue from a more interesting test with an earlier,\n// different initialization property, but it once caught a compiler bug so\n// it lives.\nfunc checkInitialized(pb *GoTest, t *testing.T) {\n\tif pb.F_BoolDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set boolean:\", *pb.F_BoolDefaulted)\n\t}\n\tif pb.F_Int32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int32:\", *pb.F_Int32Defaulted)\n\t}\n\tif pb.F_Int64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int64:\", *pb.F_Int64Defaulted)\n\t}\n\tif pb.F_Fixed32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set fixed32:\", *pb.F_Fixed32Defaulted)\n\t}\n\tif pb.F_Fixed64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set fixed64:\", *pb.F_Fixed64Defaulted)\n\t}\n\tif pb.F_Uint32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set uint32:\", *pb.F_Uint32Defaulted)\n\t}\n\tif pb.F_Uint64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set uint64:\", *pb.F_Uint64Defaulted)\n\t}\n\tif pb.F_FloatDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set float:\", *pb.F_FloatDefaulted)\n\t}\n\tif pb.F_DoubleDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set double:\", *pb.F_DoubleDefaulted)\n\t}\n\tif pb.F_StringDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set string:\", *pb.F_StringDefaulted)\n\t}\n\tif pb.F_BytesDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set bytes:\", string(pb.F_BytesDefaulted))\n\t}\n\tif pb.F_Sint32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int32:\", *pb.F_Sint32Defaulted)\n\t}\n\tif pb.F_Sint64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int64:\", *pb.F_Sint64Defaulted)\n\t}\n}\n\n// Does Reset() reset?\nfunc TestReset(t *testing.T) {\n\tpb := initGoTest(true)\n\t// muck with some values\n\tpb.F_BoolDefaulted = Bool(false)\n\tpb.F_Int32Defaulted = Int32(237)\n\tpb.F_Int64Defaulted = Int64(12346)\n\tpb.F_Fixed32Defaulted = Uint32(32000)\n\tpb.F_Fixed64Defaulted = Uint64(666)\n\tpb.F_Uint32Defaulted = Uint32(323232)\n\tpb.F_Uint64Defaulted = nil\n\tpb.F_FloatDefaulted = nil\n\tpb.F_DoubleDefaulted = Float64(0)\n\tpb.F_StringDefaulted = String(\"gotcha\")\n\tpb.F_BytesDefaulted = []byte(\"asdfasdf\")\n\tpb.F_Sint32Defaulted = Int32(123)\n\tpb.F_Sint64Defaulted = Int64(789)\n\tpb.Reset()\n\tcheckInitialized(pb, t)\n}\n\n// All required fields set, no defaults provided.\nfunc TestEncodeDecode1(t *testing.T) {\n\tpb := initGoTest(false)\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 0x20\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 0x40\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 0xca0 = 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 0x1940 = 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2, string \"string\"\n\t\t\t\"b304\"+ // field 70, encoding 3, start group\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // field 70, encoding 4, end group\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2, string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\") // field 103, encoding 0, 0x7f zigzag64\n}\n\n// All required fields set, defaults provided.\nfunc TestEncodeDecode2(t *testing.T) {\n\tpb := initGoTest(true)\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All default fields set to their default value by hand\nfunc TestEncodeDecode3(t *testing.T) {\n\tpb := initGoTest(false)\n\tpb.F_BoolDefaulted = Bool(true)\n\tpb.F_Int32Defaulted = Int32(32)\n\tpb.F_Int64Defaulted = Int64(64)\n\tpb.F_Fixed32Defaulted = Uint32(320)\n\tpb.F_Fixed64Defaulted = Uint64(640)\n\tpb.F_Uint32Defaulted = Uint32(3200)\n\tpb.F_Uint64Defaulted = Uint64(6400)\n\tpb.F_FloatDefaulted = Float32(314159)\n\tpb.F_DoubleDefaulted = Float64(271828)\n\tpb.F_StringDefaulted = String(\"hello, \\\"world!\\\"\\n\")\n\tpb.F_BytesDefaulted = []byte(\"Bignose\")\n\tpb.F_Sint32Defaulted = Int32(-32)\n\tpb.F_Sint64Defaulted = Int64(-64)\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All required fields set, defaults provided, all non-defaulted optional fields have values.\nfunc TestEncodeDecode4(t *testing.T) {\n\tpb := initGoTest(true)\n\tpb.Table = String(\"hello\")\n\tpb.Param = Int32(7)\n\tpb.OptionalField = initGoTestField()\n\tpb.F_BoolOptional = Bool(true)\n\tpb.F_Int32Optional = Int32(32)\n\tpb.F_Int64Optional = Int64(64)\n\tpb.F_Fixed32Optional = Uint32(3232)\n\tpb.F_Fixed64Optional = Uint64(6464)\n\tpb.F_Uint32Optional = Uint32(323232)\n\tpb.F_Uint64Optional = Uint64(646464)\n\tpb.F_FloatOptional = Float32(32.)\n\tpb.F_DoubleOptional = Float64(64.)\n\tpb.F_StringOptional = String(\"hello\")\n\tpb.F_BytesOptional = []byte(\"Bignose\")\n\tpb.F_Sint32Optional = Int32(-32)\n\tpb.F_Sint64Optional = Int64(-64)\n\tpb.Optionalgroup = initGoTest_OptionalGroup()\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"1205\"+\"68656c6c6f\"+ // field 2, encoding 2, string \"hello\"\n\t\t\t\"1807\"+ // field 3, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"320d\"+\"0a056c6162656c120474797065\"+ // field 6, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"f00101\"+ // field 30, encoding 0, value 1\n\t\t\t\"f80120\"+ // field 31, encoding 0, value 32\n\t\t\t\"800240\"+ // field 32, encoding 0, value 64\n\t\t\t\"8d02a00c0000\"+ // field 33, encoding 5, value 3232\n\t\t\t\"91024019000000000000\"+ // field 34, encoding 1, value 6464\n\t\t\t\"9802a0dd13\"+ // field 35, encoding 0, value 323232\n\t\t\t\"a002c0ba27\"+ // field 36, encoding 0, value 646464\n\t\t\t\"ad0200000042\"+ // field 37, encoding 5, value 32.0\n\t\t\t\"b1020000000000005040\"+ // field 38, encoding 1, value 64.0\n\t\t\t\"ba0205\"+\"68656c6c6f\"+ // field 39, encoding 2, string \"hello\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"d305\"+ // start group field 90 level 1\n\t\t\t\"da0508\"+\"6f7074696f6e616c\"+ // field 91, encoding 2, string \"optional\"\n\t\t\t\"d405\"+ // end group field 90 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"ea1207\"+\"4269676e6f7365\"+ // field 301, encoding 2, string \"Bignose\"\n\t\t\t\"f0123f\"+ // field 302, encoding 0, value 63\n\t\t\t\"f8127f\"+ // field 303, encoding 0, value 127\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All required fields set, defaults provided, all repeated fields given two values.\nfunc TestEncodeDecode5(t *testing.T) {\n\tpb := initGoTest(true)\n\tpb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}\n\tpb.F_BoolRepeated = []bool{false, true}\n\tpb.F_Int32Repeated = []int32{32, 33}\n\tpb.F_Int64Repeated = []int64{64, 65}\n\tpb.F_Fixed32Repeated = []uint32{3232, 3333}\n\tpb.F_Fixed64Repeated = []uint64{6464, 6565}\n\tpb.F_Uint32Repeated = []uint32{323232, 333333}\n\tpb.F_Uint64Repeated = []uint64{646464, 656565}\n\tpb.F_FloatRepeated = []float32{32., 33.}\n\tpb.F_DoubleRepeated = []float64{64., 65.}\n\tpb.F_StringRepeated = []string{\"hello\", \"sailor\"}\n\tpb.F_BytesRepeated = [][]byte{[]byte(\"big\"), []byte(\"nose\")}\n\tpb.F_Sint32Repeated = []int32{32, -32}\n\tpb.F_Sint64Repeated = []int64{64, -64}\n\tpb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"2a0d\"+\"0a056c6162656c120474797065\"+ // field 5, encoding 2 (GoTestField)\n\t\t\t\"2a0d\"+\"0a056c6162656c120474797065\"+ // field 5, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"a00100\"+ // field 20, encoding 0, value 0\n\t\t\t\"a00101\"+ // field 20, encoding 0, value 1\n\t\t\t\"a80120\"+ // field 21, encoding 0, value 32\n\t\t\t\"a80121\"+ // field 21, encoding 0, value 33\n\t\t\t\"b00140\"+ // field 22, encoding 0, value 64\n\t\t\t\"b00141\"+ // field 22, encoding 0, value 65\n\t\t\t\"bd01a00c0000\"+ // field 23, encoding 5, value 3232\n\t\t\t\"bd01050d0000\"+ // field 23, encoding 5, value 3333\n\t\t\t\"c1014019000000000000\"+ // field 24, encoding 1, value 6464\n\t\t\t\"c101a519000000000000\"+ // field 24, encoding 1, value 6565\n\t\t\t\"c801a0dd13\"+ // field 25, encoding 0, value 323232\n\t\t\t\"c80195ac14\"+ // field 25, encoding 0, value 333333\n\t\t\t\"d001c0ba27\"+ // field 26, encoding 0, value 646464\n\t\t\t\"d001b58928\"+ // field 26, encoding 0, value 656565\n\t\t\t\"dd0100000042\"+ // field 27, encoding 5, value 32.0\n\t\t\t\"dd0100000442\"+ // field 27, encoding 5, value 33.0\n\t\t\t\"e1010000000000005040\"+ // field 28, encoding 1, value 64.0\n\t\t\t\"e1010000000000405040\"+ // field 28, encoding 1, value 65.0\n\t\t\t\"ea0105\"+\"68656c6c6f\"+ // field 29, encoding 2, string \"hello\"\n\t\t\t\"ea0106\"+\"7361696c6f72\"+ // field 29, encoding 2, string \"sailor\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"8305\"+ // start group field 80 level 1\n\t\t\t\"8a0508\"+\"7265706561746564\"+ // field 81, encoding 2, string \"repeated\"\n\t\t\t\"8405\"+ // end group field 80 level 1\n\t\t\t\"8305\"+ // start group field 80 level 1\n\t\t\t\"8a0508\"+\"7265706561746564\"+ // field 81, encoding 2, string \"repeated\"\n\t\t\t\"8405\"+ // end group field 80 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"ca0c03\"+\"626967\"+ // field 201, encoding 2, string \"big\"\n\t\t\t\"ca0c04\"+\"6e6f7365\"+ // field 201, encoding 2, string \"nose\"\n\t\t\t\"d00c40\"+ // field 202, encoding 0, value 32\n\t\t\t\"d00c3f\"+ // field 202, encoding 0, value -32\n\t\t\t\"d80c8001\"+ // field 203, encoding 0, value 64\n\t\t\t\"d80c7f\"+ // field 203, encoding 0, value -64\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All required fields set, all packed repeated fields given two values.\nfunc TestEncodeDecode6(t *testing.T) {\n\tpb := initGoTest(false)\n\tpb.F_BoolRepeatedPacked = []bool{false, true}\n\tpb.F_Int32RepeatedPacked = []int32{32, 33}\n\tpb.F_Int64RepeatedPacked = []int64{64, 65}\n\tpb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}\n\tpb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}\n\tpb.F_Uint32RepeatedPacked = []uint32{323232, 333333}\n\tpb.F_Uint64RepeatedPacked = []uint64{646464, 656565}\n\tpb.F_FloatRepeatedPacked = []float32{32., 33.}\n\tpb.F_DoubleRepeatedPacked = []float64{64., 65.}\n\tpb.F_Sint32RepeatedPacked = []int32{32, -32}\n\tpb.F_Sint64RepeatedPacked = []int64{64, -64}\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"9203020001\"+ // field 50, encoding 2, 2 bytes, value 0, value 1\n\t\t\t\"9a03022021\"+ // field 51, encoding 2, 2 bytes, value 32, value 33\n\t\t\t\"a203024041\"+ // field 52, encoding 2, 2 bytes, value 64, value 65\n\t\t\t\"aa0308\"+ // field 53, encoding 2, 8 bytes\n\t\t\t\"a00c0000050d0000\"+ // value 3232, value 3333\n\t\t\t\"b20310\"+ // field 54, encoding 2, 16 bytes\n\t\t\t\"4019000000000000a519000000000000\"+ // value 6464, value 6565\n\t\t\t\"ba0306\"+ // field 55, encoding 2, 6 bytes\n\t\t\t\"a0dd1395ac14\"+ // value 323232, value 333333\n\t\t\t\"c20306\"+ // field 56, encoding 2, 6 bytes\n\t\t\t\"c0ba27b58928\"+ // value 646464, value 656565\n\t\t\t\"ca0308\"+ // field 57, encoding 2, 8 bytes\n\t\t\t\"0000004200000442\"+ // value 32.0, value 33.0\n\t\t\t\"d20310\"+ // field 58, encoding 2, 16 bytes\n\t\t\t\"00000000000050400000000000405040\"+ // value 64.0, value 65.0\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"b21f02\"+ // field 502, encoding 2, 2 bytes\n\t\t\t\"403f\"+ // value 32, value -32\n\t\t\t\"ba1f03\"+ // field 503, encoding 2, 3 bytes\n\t\t\t\"80017f\") // value 64, value -64\n}\n\n// Test that we can encode empty bytes fields.\nfunc TestEncodeDecodeBytes1(t *testing.T) {\n\tpb := initGoTest(false)\n\n\t// Create our bytes\n\tpb.F_BytesRequired = []byte{}\n\tpb.F_BytesRepeated = [][]byte{{}}\n\tpb.F_BytesOptional = []byte{}\n\n\td, err := Marshal(pb)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tpbd := new(GoTest)\n\tif err := Unmarshal(d, pbd); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {\n\t\tt.Error(\"required empty bytes field is incorrect\")\n\t}\n\tif pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {\n\t\tt.Error(\"repeated empty bytes field is incorrect\")\n\t}\n\tif pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {\n\t\tt.Error(\"optional empty bytes field is incorrect\")\n\t}\n}\n\n// Test that we encode nil-valued fields of a repeated bytes field correctly.\n// Since entries in a repeated field cannot be nil, nil must mean empty value.\nfunc TestEncodeDecodeBytes2(t *testing.T) {\n\tpb := initGoTest(false)\n\n\t// Create our bytes\n\tpb.F_BytesRepeated = [][]byte{nil}\n\n\td, err := Marshal(pb)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tpbd := new(GoTest)\n\tif err := Unmarshal(d, pbd); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {\n\t\tt.Error(\"Unexpected value for repeated bytes field\")\n\t}\n}\n\n// All required fields set, defaults provided, all repeated fields given two values.\nfunc TestSkippingUnrecognizedFields(t *testing.T) {\n\to := old()\n\tpb := initGoTestField()\n\n\t// Marshal it normally.\n\to.Marshal(pb)\n\n\t// Now new a GoSkipTest record.\n\tskip := &GoSkipTest{\n\t\tSkipInt32:   Int32(32),\n\t\tSkipFixed32: Uint32(3232),\n\t\tSkipFixed64: Uint64(6464),\n\t\tSkipString:  String(\"skipper\"),\n\t\tSkipgroup: &GoSkipTest_SkipGroup{\n\t\t\tGroupInt32:  Int32(75),\n\t\t\tGroupString: String(\"wxyz\"),\n\t\t},\n\t}\n\n\t// Marshal it into same buffer.\n\to.Marshal(skip)\n\n\tpbd := new(GoTestField)\n\to.Unmarshal(pbd)\n\n\t// The __unrecognized field should be a marshaling of GoSkipTest\n\tskipd := new(GoSkipTest)\n\n\to.SetBuf(pbd.XXX_unrecognized)\n\to.Unmarshal(skipd)\n\n\tif *skipd.SkipInt32 != *skip.SkipInt32 {\n\t\tt.Error(\"skip int32\", skipd.SkipInt32)\n\t}\n\tif *skipd.SkipFixed32 != *skip.SkipFixed32 {\n\t\tt.Error(\"skip fixed32\", skipd.SkipFixed32)\n\t}\n\tif *skipd.SkipFixed64 != *skip.SkipFixed64 {\n\t\tt.Error(\"skip fixed64\", skipd.SkipFixed64)\n\t}\n\tif *skipd.SkipString != *skip.SkipString {\n\t\tt.Error(\"skip string\", *skipd.SkipString)\n\t}\n\tif *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {\n\t\tt.Error(\"skip group int32\", skipd.Skipgroup.GroupInt32)\n\t}\n\tif *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {\n\t\tt.Error(\"skip group string\", *skipd.Skipgroup.GroupString)\n\t}\n}\n\n// Check that unrecognized fields of a submessage are preserved.\nfunc TestSubmessageUnrecognizedFields(t *testing.T) {\n\tnm := &NewMessage{\n\t\tNested: &NewMessage_Nested{\n\t\t\tName:      String(\"Nigel\"),\n\t\t\tFoodGroup: String(\"carbs\"),\n\t\t},\n\t}\n\tb, err := Marshal(nm)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal of NewMessage: %v\", err)\n\t}\n\n\t// Unmarshal into an OldMessage.\n\tom := new(OldMessage)\n\tif err := Unmarshal(b, om); err != nil {\n\t\tt.Fatalf(\"Unmarshal to OldMessage: %v\", err)\n\t}\n\texp := &OldMessage{\n\t\tNested: &OldMessage_Nested{\n\t\t\tName: String(\"Nigel\"),\n\t\t\t// normal protocol buffer users should not do this\n\t\t\tXXX_unrecognized: []byte(\"\\x12\\x05carbs\"),\n\t\t},\n\t}\n\tif !Equal(om, exp) {\n\t\tt.Errorf(\"om = %v, want %v\", om, exp)\n\t}\n\n\t// Clone the OldMessage.\n\tom = Clone(om).(*OldMessage)\n\tif !Equal(om, exp) {\n\t\tt.Errorf(\"Clone(om) = %v, want %v\", om, exp)\n\t}\n\n\t// Marshal the OldMessage, then unmarshal it into an empty NewMessage.\n\tif b, err = Marshal(om); err != nil {\n\t\tt.Fatalf(\"Marshal of OldMessage: %v\", err)\n\t}\n\tt.Logf(\"Marshal(%v) -> %q\", om, b)\n\tnm2 := new(NewMessage)\n\tif err := Unmarshal(b, nm2); err != nil {\n\t\tt.Fatalf(\"Unmarshal to NewMessage: %v\", err)\n\t}\n\tif !Equal(nm, nm2) {\n\t\tt.Errorf(\"NewMessage round-trip: %v => %v\", nm, nm2)\n\t}\n}\n\n// Check that an int32 field can be upgraded to an int64 field.\nfunc TestNegativeInt32(t *testing.T) {\n\tom := &OldMessage{\n\t\tNum: Int32(-1),\n\t}\n\tb, err := Marshal(om)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal of OldMessage: %v\", err)\n\t}\n\n\t// Check the size. It should be 11 bytes;\n\t// 1 for the field/wire type, and 10 for the negative number.\n\tif len(b) != 11 {\n\t\tt.Errorf(\"%v marshaled as %q, wanted 11 bytes\", om, b)\n\t}\n\n\t// Unmarshal into a NewMessage.\n\tnm := new(NewMessage)\n\tif err := Unmarshal(b, nm); err != nil {\n\t\tt.Fatalf(\"Unmarshal to NewMessage: %v\", err)\n\t}\n\twant := &NewMessage{\n\t\tNum: Int64(-1),\n\t}\n\tif !Equal(nm, want) {\n\t\tt.Errorf(\"nm = %v, want %v\", nm, want)\n\t}\n}\n\n// Check that we can grow an array (repeated field) to have many elements.\n// This test doesn't depend only on our encoding; for variety, it makes sure\n// we create, encode, and decode the correct contents explicitly.  It's therefore\n// a bit messier.\n// This test also uses (and hence tests) the Marshal/Unmarshal functions\n// instead of the methods.\nfunc TestBigRepeated(t *testing.T) {\n\tpb := initGoTest(true)\n\n\t// Create the arrays\n\tconst N = 50 // Internally the library starts much smaller.\n\tpb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)\n\tpb.F_Sint64Repeated = make([]int64, N)\n\tpb.F_Sint32Repeated = make([]int32, N)\n\tpb.F_BytesRepeated = make([][]byte, N)\n\tpb.F_StringRepeated = make([]string, N)\n\tpb.F_DoubleRepeated = make([]float64, N)\n\tpb.F_FloatRepeated = make([]float32, N)\n\tpb.F_Uint64Repeated = make([]uint64, N)\n\tpb.F_Uint32Repeated = make([]uint32, N)\n\tpb.F_Fixed64Repeated = make([]uint64, N)\n\tpb.F_Fixed32Repeated = make([]uint32, N)\n\tpb.F_Int64Repeated = make([]int64, N)\n\tpb.F_Int32Repeated = make([]int32, N)\n\tpb.F_BoolRepeated = make([]bool, N)\n\tpb.RepeatedField = make([]*GoTestField, N)\n\n\t// Fill in the arrays with checkable values.\n\tigtf := initGoTestField()\n\tigtrg := initGoTest_RepeatedGroup()\n\tfor i := 0; i < N; i++ {\n\t\tpb.Repeatedgroup[i] = igtrg\n\t\tpb.F_Sint64Repeated[i] = int64(i)\n\t\tpb.F_Sint32Repeated[i] = int32(i)\n\t\ts := fmt.Sprint(i)\n\t\tpb.F_BytesRepeated[i] = []byte(s)\n\t\tpb.F_StringRepeated[i] = s\n\t\tpb.F_DoubleRepeated[i] = float64(i)\n\t\tpb.F_FloatRepeated[i] = float32(i)\n\t\tpb.F_Uint64Repeated[i] = uint64(i)\n\t\tpb.F_Uint32Repeated[i] = uint32(i)\n\t\tpb.F_Fixed64Repeated[i] = uint64(i)\n\t\tpb.F_Fixed32Repeated[i] = uint32(i)\n\t\tpb.F_Int64Repeated[i] = int64(i)\n\t\tpb.F_Int32Repeated[i] = int32(i)\n\t\tpb.F_BoolRepeated[i] = i%2 == 0\n\t\tpb.RepeatedField[i] = igtf\n\t}\n\n\t// Marshal.\n\tbuf, _ := Marshal(pb)\n\n\t// Now test Unmarshal by recreating the original buffer.\n\tpbd := new(GoTest)\n\tUnmarshal(buf, pbd)\n\n\t// Check the checkable values\n\tfor i := uint64(0); i < N; i++ {\n\t\tif pbd.Repeatedgroup[i] == nil { // TODO: more checking?\n\t\t\tt.Error(\"pbd.Repeatedgroup bad\")\n\t\t}\n\t\tvar x uint64\n\t\tx = uint64(pbd.F_Sint64Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Sint64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Sint32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Sint32Repeated bad\", x, i)\n\t\t}\n\t\ts := fmt.Sprint(i)\n\t\tequalbytes(pbd.F_BytesRepeated[i], []byte(s), t)\n\t\tif pbd.F_StringRepeated[i] != s {\n\t\t\tt.Error(\"pbd.F_Sint32Repeated bad\", pbd.F_StringRepeated[i], i)\n\t\t}\n\t\tx = uint64(pbd.F_DoubleRepeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_DoubleRepeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_FloatRepeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_FloatRepeated bad\", x, i)\n\t\t}\n\t\tx = pbd.F_Uint64Repeated[i]\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Uint64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Uint32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Uint32Repeated bad\", x, i)\n\t\t}\n\t\tx = pbd.F_Fixed64Repeated[i]\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Fixed64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Fixed32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Fixed32Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Int64Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Int64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Int32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Int32Repeated bad\", x, i)\n\t\t}\n\t\tif pbd.F_BoolRepeated[i] != (i%2 == 0) {\n\t\t\tt.Error(\"pbd.F_BoolRepeated bad\", x, i)\n\t\t}\n\t\tif pbd.RepeatedField[i] == nil { // TODO: more checking?\n\t\t\tt.Error(\"pbd.RepeatedField bad\")\n\t\t}\n\t}\n}\n\n// Verify we give a useful message when decoding to the wrong structure type.\nfunc TestTypeMismatch(t *testing.T) {\n\tpb1 := initGoTest(true)\n\n\t// Marshal\n\to := old()\n\to.Marshal(pb1)\n\n\t// Now Unmarshal it to the wrong type.\n\tpb2 := initGoTestField()\n\terr := o.Unmarshal(pb2)\n\tif err == nil {\n\t\tt.Error(\"expected error, got no error\")\n\t} else if !strings.Contains(err.Error(), \"bad wiretype\") {\n\t\tt.Error(\"expected bad wiretype error, got\", err)\n\t}\n}\n\nfunc encodeDecode(t *testing.T, in, out Message, msg string) {\n\tbuf, err := Marshal(in)\n\tif err != nil {\n\t\tt.Fatalf(\"failed marshaling %v: %v\", msg, err)\n\t}\n\tif err := Unmarshal(buf, out); err != nil {\n\t\tt.Fatalf(\"failed unmarshaling %v: %v\", msg, err)\n\t}\n}\n\nfunc TestPackedNonPackedDecoderSwitching(t *testing.T) {\n\tnp, p := new(NonPackedTest), new(PackedTest)\n\n\t// non-packed -> packed\n\tnp.A = []int32{0, 1, 1, 2, 3, 5}\n\tencodeDecode(t, np, p, \"non-packed -> packed\")\n\tif !reflect.DeepEqual(np.A, p.B) {\n\t\tt.Errorf(\"failed non-packed -> packed; np.A=%+v, p.B=%+v\", np.A, p.B)\n\t}\n\n\t// packed -> non-packed\n\tnp.Reset()\n\tp.B = []int32{3, 1, 4, 1, 5, 9}\n\tencodeDecode(t, p, np, \"packed -> non-packed\")\n\tif !reflect.DeepEqual(p.B, np.A) {\n\t\tt.Errorf(\"failed packed -> non-packed; p.B=%+v, np.A=%+v\", p.B, np.A)\n\t}\n}\n\nfunc TestProto1RepeatedGroup(t *testing.T) {\n\tpb := &MessageList{\n\t\tMessage: []*MessageList_Message{\n\t\t\t{\n\t\t\t\tName:  String(\"blah\"),\n\t\t\t\tCount: Int32(7),\n\t\t\t},\n\t\t\t// NOTE: pb.Message[1] is a nil\n\t\t\tnil,\n\t\t},\n\t}\n\n\to := old()\n\terr := o.Marshal(pb)\n\tif err == nil || !strings.Contains(err.Error(), \"repeated field Message has nil\") {\n\t\tt.Fatalf(\"unexpected or no error when marshaling: %v\", err)\n\t}\n}\n\n// Test that enums work.  Checks for a bug introduced by making enums\n// named types instead of int32: newInt32FromUint64 would crash with\n// a type mismatch in reflect.PointTo.\nfunc TestEnum(t *testing.T) {\n\tpb := new(GoEnum)\n\tpb.Foo = FOO_FOO1.Enum()\n\to := old()\n\tif err := o.Marshal(pb); err != nil {\n\t\tt.Fatal(\"error encoding enum:\", err)\n\t}\n\tpb1 := new(GoEnum)\n\tif err := o.Unmarshal(pb1); err != nil {\n\t\tt.Fatal(\"error decoding enum:\", err)\n\t}\n\tif *pb1.Foo != FOO_FOO1 {\n\t\tt.Error(\"expected 7 but got \", *pb1.Foo)\n\t}\n}\n\n// Enum types have String methods. Check that enum fields can be printed.\n// We don't care what the value actually is, just as long as it doesn't crash.\nfunc TestPrintingNilEnumFields(t *testing.T) {\n\tpb := new(GoEnum)\n\tfmt.Sprintf(\"%+v\", pb)\n}\n\n// Verify that absent required fields cause Marshal/Unmarshal to return errors.\nfunc TestRequiredFieldEnforcement(t *testing.T) {\n\tpb := new(GoTestField)\n\t_, err := Marshal(pb)\n\tif err == nil {\n\t\tt.Error(\"marshal: expected error, got nil\")\n\t} else if strings.Index(err.Error(), \"Label\") < 0 {\n\t\tt.Errorf(\"marshal: bad error type: %v\", err)\n\t}\n\n\t// A slightly sneaky, yet valid, proto. It encodes the same required field twice,\n\t// so simply counting the required fields is insufficient.\n\t// field 1, encoding 2, value \"hi\"\n\tbuf := []byte(\"\\x0A\\x02hi\\x0A\\x02hi\")\n\terr = Unmarshal(buf, pb)\n\tif err == nil {\n\t\tt.Error(\"unmarshal: expected error, got nil\")\n\t} else if strings.Index(err.Error(), \"{Unknown}\") < 0 {\n\t\tt.Errorf(\"unmarshal: bad error type: %v\", err)\n\t}\n}\n\nfunc TestTypedNilMarshal(t *testing.T) {\n\t// A typed nil should return ErrNil and not crash.\n\t_, err := Marshal((*GoEnum)(nil))\n\tif err != ErrNil {\n\t\tt.Errorf(\"Marshal: got err %v, want ErrNil\", err)\n\t}\n}\n\n// A type that implements the Marshaler interface, but is not nillable.\ntype nonNillableInt uint64\n\nfunc (nni nonNillableInt) Marshal() ([]byte, error) {\n\treturn EncodeVarint(uint64(nni)), nil\n}\n\ntype NNIMessage struct {\n\tnni nonNillableInt\n}\n\nfunc (*NNIMessage) Reset()         {}\nfunc (*NNIMessage) String() string { return \"\" }\nfunc (*NNIMessage) ProtoMessage()  {}\n\n// A type that implements the Marshaler interface and is nillable.\ntype nillableMessage struct {\n\tx uint64\n}\n\nfunc (nm *nillableMessage) Marshal() ([]byte, error) {\n\treturn EncodeVarint(nm.x), nil\n}\n\ntype NMMessage struct {\n\tnm *nillableMessage\n}\n\nfunc (*NMMessage) Reset()         {}\nfunc (*NMMessage) String() string { return \"\" }\nfunc (*NMMessage) ProtoMessage()  {}\n\n// Verify a type that uses the Marshaler interface, but has a nil pointer.\nfunc TestNilMarshaler(t *testing.T) {\n\t// Try a struct with a Marshaler field that is nil.\n\t// It should be directly marshable.\n\tnmm := new(NMMessage)\n\tif _, err := Marshal(nmm); err != nil {\n\t\tt.Error(\"unexpected error marshaling nmm: \", err)\n\t}\n\n\t// Try a struct with a Marshaler field that is not nillable.\n\tnnim := new(NNIMessage)\n\tnnim.nni = 7\n\tvar _ Marshaler = nnim.nni // verify it is truly a Marshaler\n\tif _, err := Marshal(nnim); err != nil {\n\t\tt.Error(\"unexpected error marshaling nnim: \", err)\n\t}\n}\n\nfunc TestAllSetDefaults(t *testing.T) {\n\t// Exercise SetDefaults with all scalar field types.\n\tm := &Defaults{\n\t\t// NaN != NaN, so override that here.\n\t\tF_Nan: Float32(1.7),\n\t}\n\texpected := &Defaults{\n\t\tF_Bool:    Bool(true),\n\t\tF_Int32:   Int32(32),\n\t\tF_Int64:   Int64(64),\n\t\tF_Fixed32: Uint32(320),\n\t\tF_Fixed64: Uint64(640),\n\t\tF_Uint32:  Uint32(3200),\n\t\tF_Uint64:  Uint64(6400),\n\t\tF_Float:   Float32(314159),\n\t\tF_Double:  Float64(271828),\n\t\tF_String:  String(`hello, \"world!\"` + \"\\n\"),\n\t\tF_Bytes:   []byte(\"Bignose\"),\n\t\tF_Sint32:  Int32(-32),\n\t\tF_Sint64:  Int64(-64),\n\t\tF_Enum:    Defaults_GREEN.Enum(),\n\t\tF_Pinf:    Float32(float32(math.Inf(1))),\n\t\tF_Ninf:    Float32(float32(math.Inf(-1))),\n\t\tF_Nan:     Float32(1.7),\n\t\tStrZero:   String(\"\"),\n\t}\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"SetDefaults failed\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestSetDefaultsWithSetField(t *testing.T) {\n\t// Check that a set value is not overridden.\n\tm := &Defaults{\n\t\tF_Int32: Int32(12),\n\t}\n\tSetDefaults(m)\n\tif v := m.GetF_Int32(); v != 12 {\n\t\tt.Errorf(\"m.FInt32 = %v, want 12\", v)\n\t}\n}\n\nfunc TestSetDefaultsWithSubMessage(t *testing.T) {\n\tm := &OtherMessage{\n\t\tKey: Int64(123),\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"gopher\"),\n\t\t},\n\t}\n\texpected := &OtherMessage{\n\t\tKey: Int64(123),\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"gopher\"),\n\t\t\tPort: Int32(4000),\n\t\t},\n\t}\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {\n\tm := &MyMessage{\n\t\tRepInner: []*InnerMessage{{}},\n\t}\n\texpected := &MyMessage{\n\t\tRepInner: []*InnerMessage{{\n\t\t\tPort: Int32(4000),\n\t\t}},\n\t}\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestSetDefaultWithRepeatedNonMessage(t *testing.T) {\n\tm := &MyMessage{\n\t\tPet: []string{\"turtle\", \"wombat\"},\n\t}\n\texpected := Clone(m)\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestMaximumTagNumber(t *testing.T) {\n\tm := &MaxTag{\n\t\tLastField: String(\"natural goat essence\"),\n\t}\n\tbuf, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"proto.Marshal failed: %v\", err)\n\t}\n\tm2 := new(MaxTag)\n\tif err := Unmarshal(buf, m2); err != nil {\n\t\tt.Fatalf(\"proto.Unmarshal failed: %v\", err)\n\t}\n\tif got, want := m2.GetLastField(), *m.LastField; got != want {\n\t\tt.Errorf(\"got %q, want %q\", got, want)\n\t}\n}\n\nfunc TestJSON(t *testing.T) {\n\tm := &MyMessage{\n\t\tCount: Int32(4),\n\t\tPet:   []string{\"bunny\", \"kitty\"},\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"cauchy\"),\n\t\t},\n\t\tBikeshed: MyMessage_GREEN.Enum(),\n\t}\n\tconst expected = `{\"count\":4,\"pet\":[\"bunny\",\"kitty\"],\"inner\":{\"host\":\"cauchy\"},\"bikeshed\":1}`\n\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"json.Marshal failed: %v\", err)\n\t}\n\ts := string(b)\n\tif s != expected {\n\t\tt.Errorf(\"got  %s\\nwant %s\", s, expected)\n\t}\n\n\treceived := new(MyMessage)\n\tif err := json.Unmarshal(b, received); err != nil {\n\t\tt.Fatalf(\"json.Unmarshal failed: %v\", err)\n\t}\n\tif !Equal(received, m) {\n\t\tt.Fatalf(\"got %s, want %s\", received, m)\n\t}\n\n\t// Test unmarshalling of JSON with symbolic enum name.\n\tconst old = `{\"count\":4,\"pet\":[\"bunny\",\"kitty\"],\"inner\":{\"host\":\"cauchy\"},\"bikeshed\":\"GREEN\"}`\n\treceived.Reset()\n\tif err := json.Unmarshal([]byte(old), received); err != nil {\n\t\tt.Fatalf(\"json.Unmarshal failed: %v\", err)\n\t}\n\tif !Equal(received, m) {\n\t\tt.Fatalf(\"got %s, want %s\", received, m)\n\t}\n}\n\nfunc TestBadWireType(t *testing.T) {\n\tb := []byte{7<<3 | 6} // field 7, wire type 6\n\tpb := new(OtherMessage)\n\tif err := Unmarshal(b, pb); err == nil {\n\t\tt.Errorf(\"Unmarshal did not fail\")\n\t} else if !strings.Contains(err.Error(), \"unknown wire type\") {\n\t\tt.Errorf(\"wrong error: %v\", err)\n\t}\n}\n\nfunc TestBytesWithInvalidLength(t *testing.T) {\n\t// If a byte sequence has an invalid (negative) length, Unmarshal should not panic.\n\tb := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}\n\tUnmarshal(b, new(MyMessage))\n}\n\nfunc TestLengthOverflow(t *testing.T) {\n\t// Overflowing a length should not panic.\n\tb := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}\n\tUnmarshal(b, new(MyMessage))\n}\n\nfunc TestVarintOverflow(t *testing.T) {\n\t// Overflowing a 64-bit length should not be allowed.\n\tb := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}\n\tif err := Unmarshal(b, new(MyMessage)); err == nil {\n\t\tt.Fatalf(\"Overflowed uint64 length without error\")\n\t}\n}\n\nfunc TestUnmarshalFuzz(t *testing.T) {\n\tconst N = 1000\n\tseed := time.Now().UnixNano()\n\tt.Logf(\"RNG seed is %d\", seed)\n\trng := rand.New(rand.NewSource(seed))\n\tbuf := make([]byte, 20)\n\tfor i := 0; i < N; i++ {\n\t\tfor j := range buf {\n\t\t\tbuf[j] = byte(rng.Intn(256))\n\t\t}\n\t\tfuzzUnmarshal(t, buf)\n\t}\n}\n\nfunc TestMergeMessages(t *testing.T) {\n\tpb := &MessageList{Message: []*MessageList_Message{{Name: String(\"x\"), Count: Int32(1)}}}\n\tdata, err := Marshal(pb)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\n\tpb1 := new(MessageList)\n\tif err := Unmarshal(data, pb1); err != nil {\n\t\tt.Fatalf(\"first Unmarshal: %v\", err)\n\t}\n\tif err := Unmarshal(data, pb1); err != nil {\n\t\tt.Fatalf(\"second Unmarshal: %v\", err)\n\t}\n\tif len(pb1.Message) != 1 {\n\t\tt.Errorf(\"two Unmarshals produced %d Messages, want 1\", len(pb1.Message))\n\t}\n\n\tpb2 := new(MessageList)\n\tif err := UnmarshalMerge(data, pb2); err != nil {\n\t\tt.Fatalf(\"first UnmarshalMerge: %v\", err)\n\t}\n\tif err := UnmarshalMerge(data, pb2); err != nil {\n\t\tt.Fatalf(\"second UnmarshalMerge: %v\", err)\n\t}\n\tif len(pb2.Message) != 2 {\n\t\tt.Errorf(\"two UnmarshalMerges produced %d Messages, want 2\", len(pb2.Message))\n\t}\n}\n\nfunc TestExtensionMarshalOrder(t *testing.T) {\n\tm := &MyMessage{Count: Int(123)}\n\tif err := SetExtension(m, E_Ext_More, &Ext{Data: String(\"alpha\")}); err != nil {\n\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t}\n\tif err := SetExtension(m, E_Ext_Text, String(\"aleph\")); err != nil {\n\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t}\n\tif err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {\n\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t}\n\n\t// Serialize m several times, and check we get the same bytes each time.\n\tvar orig []byte\n\tfor i := 0; i < 100; i++ {\n\t\tb, err := Marshal(m)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Marshal: %v\", err)\n\t\t}\n\t\tif i == 0 {\n\t\t\torig = b\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(b, orig) {\n\t\t\tt.Errorf(\"Bytes differ on attempt #%d\", i)\n\t\t}\n\t}\n}\n\n// Many extensions, because small maps might not iterate differently on each iteration.\nvar exts = []*ExtensionDesc{\n\tE_X201,\n\tE_X202,\n\tE_X203,\n\tE_X204,\n\tE_X205,\n\tE_X206,\n\tE_X207,\n\tE_X208,\n\tE_X209,\n\tE_X210,\n\tE_X211,\n\tE_X212,\n\tE_X213,\n\tE_X214,\n\tE_X215,\n\tE_X216,\n\tE_X217,\n\tE_X218,\n\tE_X219,\n\tE_X220,\n\tE_X221,\n\tE_X222,\n\tE_X223,\n\tE_X224,\n\tE_X225,\n\tE_X226,\n\tE_X227,\n\tE_X228,\n\tE_X229,\n\tE_X230,\n\tE_X231,\n\tE_X232,\n\tE_X233,\n\tE_X234,\n\tE_X235,\n\tE_X236,\n\tE_X237,\n\tE_X238,\n\tE_X239,\n\tE_X240,\n\tE_X241,\n\tE_X242,\n\tE_X243,\n\tE_X244,\n\tE_X245,\n\tE_X246,\n\tE_X247,\n\tE_X248,\n\tE_X249,\n\tE_X250,\n}\n\nfunc TestMessageSetMarshalOrder(t *testing.T) {\n\tm := &MyMessageSet{}\n\tfor _, x := range exts {\n\t\tif err := SetExtension(m, x, &Empty{}); err != nil {\n\t\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t\t}\n\t}\n\n\tbuf, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\n\t// Serialize m several times, and check we get the same bytes each time.\n\tfor i := 0; i < 10; i++ {\n\t\tb1, err := Marshal(m)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Marshal: %v\", err)\n\t\t}\n\t\tif !bytes.Equal(b1, buf) {\n\t\t\tt.Errorf(\"Bytes differ on re-Marshal #%d\", i)\n\t\t}\n\n\t\tm2 := &MyMessageSet{}\n\t\tif err := Unmarshal(buf, m2); err != nil {\n\t\t\tt.Errorf(\"Unmarshal: %v\", err)\n\t\t}\n\t\tb2, err := Marshal(m2)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"re-Marshal: %v\", err)\n\t\t}\n\t\tif !bytes.Equal(b2, buf) {\n\t\t\tt.Errorf(\"Bytes differ on round-trip #%d\", i)\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalMergesMessages(t *testing.T) {\n\t// If a nested message occurs twice in the input,\n\t// the fields should be merged when decoding.\n\ta := &OtherMessage{\n\t\tKey: Int64(123),\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"polhode\"),\n\t\t\tPort: Int32(1234),\n\t\t},\n\t}\n\taData, err := Marshal(a)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal(a): %v\", err)\n\t}\n\tb := &OtherMessage{\n\t\tWeight: Float32(1.2),\n\t\tInner: &InnerMessage{\n\t\t\tHost:      String(\"herpolhode\"),\n\t\t\tConnected: Bool(true),\n\t\t},\n\t}\n\tbData, err := Marshal(b)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal(b): %v\", err)\n\t}\n\twant := &OtherMessage{\n\t\tKey:    Int64(123),\n\t\tWeight: Float32(1.2),\n\t\tInner: &InnerMessage{\n\t\t\tHost:      String(\"herpolhode\"),\n\t\t\tPort:      Int32(1234),\n\t\t\tConnected: Bool(true),\n\t\t},\n\t}\n\tgot := new(OtherMessage)\n\tif err := Unmarshal(append(aData, bData...), got); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %v\", err)\n\t}\n\tif !Equal(got, want) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", got, want)\n\t}\n}\n\nfunc TestEncodingSizes(t *testing.T) {\n\ttests := []struct {\n\t\tm Message\n\t\tn int\n\t}{\n\t\t{&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},\n\t\t{&Defaults{F_Int32: Int32(math.MinInt32)}, 11},\n\t\t{&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},\n\t\t{&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},\n\t}\n\tfor _, test := range tests {\n\t\tb, err := Marshal(test.m)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Marshal(%v): %v\", test.m, err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(b) != test.n {\n\t\t\tt.Errorf(\"Marshal(%v) yielded %d bytes, want %d bytes\", test.m, len(b), test.n)\n\t\t}\n\t}\n}\n\nfunc TestRequiredNotSetError(t *testing.T) {\n\tpb := initGoTest(false)\n\tpb.RequiredField.Label = nil\n\tpb.F_Int32Required = nil\n\tpb.F_Int64Required = nil\n\n\texpected := \"0807\" + // field 1, encoding 0, value 7\n\t\t\"2206\" + \"120474797065\" + // field 4, encoding 2 (GoTestField)\n\t\t\"5001\" + // field 10, encoding 0, value 1\n\t\t\"6d20000000\" + // field 13, encoding 5, value 0x20\n\t\t\"714000000000000000\" + // field 14, encoding 1, value 0x40\n\t\t\"78a019\" + // field 15, encoding 0, value 0xca0 = 3232\n\t\t\"8001c032\" + // field 16, encoding 0, value 0x1940 = 6464\n\t\t\"8d0100004a45\" + // field 17, encoding 5, value 3232.0\n\t\t\"9101000000000040b940\" + // field 18, encoding 1, value 6464.0\n\t\t\"9a0106\" + \"737472696e67\" + // field 19, encoding 2, string \"string\"\n\t\t\"b304\" + // field 70, encoding 3, start group\n\t\t\"ba0408\" + \"7265717569726564\" + // field 71, encoding 2, string \"required\"\n\t\t\"b404\" + // field 70, encoding 4, end group\n\t\t\"aa0605\" + \"6279746573\" + // field 101, encoding 2, string \"bytes\"\n\t\t\"b0063f\" + // field 102, encoding 0, 0x3f zigzag32\n\t\t\"b8067f\" // field 103, encoding 0, 0x7f zigzag64\n\n\to := old()\n\tbytes, err := Marshal(pb)\n\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\tfmt.Printf(\"marshal-1 err = %v, want *RequiredNotSetError\", err)\n\t\to.DebugPrint(\"\", bytes)\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\tif strings.Index(err.Error(), \"RequiredField.Label\") < 0 {\n\t\tt.Errorf(\"marshal-1 wrong err msg: %v\", err)\n\t}\n\tif !equal(bytes, expected, t) {\n\t\to.DebugPrint(\"neq 1\", bytes)\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\n\t// Now test Unmarshal by recreating the original buffer.\n\tpbd := new(GoTest)\n\terr = Unmarshal(bytes, pbd)\n\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\tt.Fatalf(\"unmarshal err = %v, want *RequiredNotSetError\", err)\n\t\to.DebugPrint(\"\", bytes)\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\tif strings.Index(err.Error(), \"RequiredField.{Unknown}\") < 0 {\n\t\tt.Errorf(\"unmarshal wrong err msg: %v\", err)\n\t}\n\tbytes, err = Marshal(pbd)\n\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\tt.Errorf(\"marshal-2 err = %v, want *RequiredNotSetError\", err)\n\t\to.DebugPrint(\"\", bytes)\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\tif strings.Index(err.Error(), \"RequiredField.Label\") < 0 {\n\t\tt.Errorf(\"marshal-2 wrong err msg: %v\", err)\n\t}\n\tif !equal(bytes, expected, t) {\n\t\to.DebugPrint(\"neq 2\", bytes)\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n}\n\nfunc fuzzUnmarshal(t *testing.T, data []byte) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tt.Errorf(\"These bytes caused a panic: %+v\", data)\n\t\t\tt.Logf(\"Stack:\\n%s\", debug.Stack())\n\t\t\tt.FailNow()\n\t\t}\n\t}()\n\n\tpb := new(MyMessage)\n\tUnmarshal(data, pb)\n}\n\nfunc TestMapFieldMarshal(t *testing.T) {\n\tm := &MessageWithMap{\n\t\tNameMapping: map[int32]string{\n\t\t\t1: \"Rob\",\n\t\t\t4: \"Ian\",\n\t\t\t8: \"Dave\",\n\t\t},\n\t}\n\tb, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\n\t// b should be the concatenation of these three byte sequences in some order.\n\tparts := []string{\n\t\t\"\\n\\a\\b\\x01\\x12\\x03Rob\",\n\t\t\"\\n\\a\\b\\x04\\x12\\x03Ian\",\n\t\t\"\\n\\b\\b\\x08\\x12\\x04Dave\",\n\t}\n\tok := false\n\tfor i := range parts {\n\t\tfor j := range parts {\n\t\t\tif j == i {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor k := range parts {\n\t\t\t\tif k == i || k == j {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttry := parts[i] + parts[j] + parts[k]\n\t\t\t\tif bytes.Equal(b, []byte(try)) {\n\t\t\t\t\tok = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif !ok {\n\t\tt.Fatalf(\"Incorrect Marshal output.\\n got %q\\nwant %q (or a permutation of that)\", b, parts[0]+parts[1]+parts[2])\n\t}\n\tt.Logf(\"FYI b: %q\", b)\n\n\t(new(Buffer)).DebugPrint(\"Dump of b\", b)\n}\n\nfunc TestMapFieldRoundTrips(t *testing.T) {\n\tm := &MessageWithMap{\n\t\tNameMapping: map[int32]string{\n\t\t\t1: \"Rob\",\n\t\t\t4: \"Ian\",\n\t\t\t8: \"Dave\",\n\t\t},\n\t\tMsgMapping: map[int64]*FloatingPoint{\n\t\t\t0x7001: &FloatingPoint{F: Float64(2.0)},\n\t\t},\n\t\tByteMapping: map[bool][]byte{\n\t\t\tfalse: []byte(\"that's not right!\"),\n\t\t\ttrue:  []byte(\"aye, 'tis true!\"),\n\t\t},\n\t}\n\tb, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\tt.Logf(\"FYI b: %q\", b)\n\tm2 := new(MessageWithMap)\n\tif err := Unmarshal(b, m2); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %v\", err)\n\t}\n\tfor _, pair := range [][2]interface{}{\n\t\t{m.NameMapping, m2.NameMapping},\n\t\t{m.MsgMapping, m2.MsgMapping},\n\t\t{m.ByteMapping, m2.ByteMapping},\n\t} {\n\t\tif !reflect.DeepEqual(pair[0], pair[1]) {\n\t\t\tt.Errorf(\"Map did not survive a round trip.\\ninitial: %v\\n  final: %v\", pair[0], pair[1])\n\t\t}\n\t}\n}\n\n// Benchmarks\n\nfunc testMsg() *GoTest {\n\tpb := initGoTest(true)\n\tconst N = 1000 // Internally the library starts much smaller.\n\tpb.F_Int32Repeated = make([]int32, N)\n\tpb.F_DoubleRepeated = make([]float64, N)\n\tfor i := 0; i < N; i++ {\n\t\tpb.F_Int32Repeated[i] = int32(i)\n\t\tpb.F_DoubleRepeated[i] = float64(i)\n\t}\n\treturn pb\n}\n\nfunc bytesMsg() *GoTest {\n\tpb := initGoTest(true)\n\tbuf := make([]byte, 4000)\n\tfor i := range buf {\n\t\tbuf[i] = byte(i)\n\t}\n\tpb.F_BytesDefaulted = buf\n\treturn pb\n}\n\nfunc benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {\n\td, _ := marshal(pb)\n\tb.SetBytes(int64(len(d)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tmarshal(pb)\n\t}\n}\n\nfunc benchmarkBufferMarshal(b *testing.B, pb Message) {\n\tp := NewBuffer(nil)\n\tbenchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {\n\t\tp.Reset()\n\t\terr := p.Marshal(pb0)\n\t\treturn p.Bytes(), err\n\t})\n}\n\nfunc benchmarkSize(b *testing.B, pb Message) {\n\tbenchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {\n\t\tSize(pb)\n\t\treturn nil, nil\n\t})\n}\n\nfunc newOf(pb Message) Message {\n\tin := reflect.ValueOf(pb)\n\tif in.IsNil() {\n\t\treturn pb\n\t}\n\treturn reflect.New(in.Type().Elem()).Interface().(Message)\n}\n\nfunc benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {\n\td, _ := Marshal(pb)\n\tb.SetBytes(int64(len(d)))\n\tpbd := newOf(pb)\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tunmarshal(d, pbd)\n\t}\n}\n\nfunc benchmarkBufferUnmarshal(b *testing.B, pb Message) {\n\tp := NewBuffer(nil)\n\tbenchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {\n\t\tp.SetBuf(d)\n\t\treturn p.Unmarshal(pb0)\n\t})\n}\n\n// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}\n\nfunc BenchmarkMarshal(b *testing.B) {\n\tbenchmarkMarshal(b, testMsg(), Marshal)\n}\n\nfunc BenchmarkBufferMarshal(b *testing.B) {\n\tbenchmarkBufferMarshal(b, testMsg())\n}\n\nfunc BenchmarkSize(b *testing.B) {\n\tbenchmarkSize(b, testMsg())\n}\n\nfunc BenchmarkUnmarshal(b *testing.B) {\n\tbenchmarkUnmarshal(b, testMsg(), Unmarshal)\n}\n\nfunc BenchmarkBufferUnmarshal(b *testing.B) {\n\tbenchmarkBufferUnmarshal(b, testMsg())\n}\n\nfunc BenchmarkMarshalBytes(b *testing.B) {\n\tbenchmarkMarshal(b, bytesMsg(), Marshal)\n}\n\nfunc BenchmarkBufferMarshalBytes(b *testing.B) {\n\tbenchmarkBufferMarshal(b, bytesMsg())\n}\n\nfunc BenchmarkSizeBytes(b *testing.B) {\n\tbenchmarkSize(b, bytesMsg())\n}\n\nfunc BenchmarkUnmarshalBytes(b *testing.B) {\n\tbenchmarkUnmarshal(b, bytesMsg(), Unmarshal)\n}\n\nfunc BenchmarkBufferUnmarshalBytes(b *testing.B) {\n\tbenchmarkBufferUnmarshal(b, bytesMsg())\n}\n\nfunc BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {\n\tb.StopTimer()\n\tpb := initGoTestField()\n\tskip := &GoSkipTest{\n\t\tSkipInt32:   Int32(32),\n\t\tSkipFixed32: Uint32(3232),\n\t\tSkipFixed64: Uint64(6464),\n\t\tSkipString:  String(\"skipper\"),\n\t\tSkipgroup: &GoSkipTest_SkipGroup{\n\t\t\tGroupInt32:  Int32(75),\n\t\t\tGroupString: String(\"wxyz\"),\n\t\t},\n\t}\n\n\tpbd := new(GoTestField)\n\tp := NewBuffer(nil)\n\tp.Marshal(pb)\n\tp.Marshal(skip)\n\tp2 := NewBuffer(nil)\n\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tp2.SetBuf(p.Bytes())\n\t\tp2.Unmarshal(pbd)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/clone.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Protocol buffer deep copy and merge.\n// TODO: MessageSet and RawMessage.\n\npackage proto\n\nimport (\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n// Clone returns a deep copy of a protocol buffer.\nfunc Clone(pb Message) Message {\n\tin := reflect.ValueOf(pb)\n\tif in.IsNil() {\n\t\treturn pb\n\t}\n\n\tout := reflect.New(in.Type().Elem())\n\t// out is empty so a merge is a deep copy.\n\tmergeStruct(out.Elem(), in.Elem())\n\treturn out.Interface().(Message)\n}\n\n// Merge merges src into dst.\n// Required and optional fields that are set in src will be set to that value in dst.\n// Elements of repeated fields will be appended.\n// Merge panics if src and dst are not the same type, or if dst is nil.\nfunc Merge(dst, src Message) {\n\tin := reflect.ValueOf(src)\n\tout := reflect.ValueOf(dst)\n\tif out.IsNil() {\n\t\tpanic(\"proto: nil destination\")\n\t}\n\tif in.Type() != out.Type() {\n\t\t// Explicit test prior to mergeStruct so that mistyped nils will fail\n\t\tpanic(\"proto: type mismatch\")\n\t}\n\tif in.IsNil() {\n\t\t// Merging nil into non-nil is a quiet no-op\n\t\treturn\n\t}\n\tmergeStruct(out.Elem(), in.Elem())\n}\n\nfunc mergeStruct(out, in reflect.Value) {\n\tfor i := 0; i < in.NumField(); i++ {\n\t\tf := in.Type().Field(i)\n\t\tif strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tcontinue\n\t\t}\n\t\tmergeAny(out.Field(i), in.Field(i))\n\t}\n\n\tif emIn, ok := in.Addr().Interface().(extendableProto); ok {\n\t\temOut := out.Addr().Interface().(extendableProto)\n\t\tmergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())\n\t}\n\n\tuf := in.FieldByName(\"XXX_unrecognized\")\n\tif !uf.IsValid() {\n\t\treturn\n\t}\n\tuin := uf.Bytes()\n\tif len(uin) > 0 {\n\t\tout.FieldByName(\"XXX_unrecognized\").SetBytes(append([]byte(nil), uin...))\n\t}\n}\n\nfunc mergeAny(out, in reflect.Value) {\n\tif in.Type() == protoMessageType {\n\t\tif !in.IsNil() {\n\t\t\tif out.IsNil() {\n\t\t\t\tout.Set(reflect.ValueOf(Clone(in.Interface().(Message))))\n\t\t\t} else {\n\t\t\t\tMerge(out.Interface().(Message), in.Interface().(Message))\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tswitch in.Kind() {\n\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,\n\t\treflect.String, reflect.Uint32, reflect.Uint64:\n\t\tout.Set(in)\n\tcase reflect.Map:\n\t\tif in.Len() == 0 {\n\t\t\treturn\n\t\t}\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.MakeMap(in.Type()))\n\t\t}\n\t\t// For maps with value types of *T or []byte we need to deep copy each value.\n\t\telemKind := in.Type().Elem().Kind()\n\t\tfor _, key := range in.MapKeys() {\n\t\t\tvar val reflect.Value\n\t\t\tswitch elemKind {\n\t\t\tcase reflect.Ptr:\n\t\t\t\tval = reflect.New(in.Type().Elem().Elem())\n\t\t\t\tmergeAny(val, in.MapIndex(key))\n\t\t\tcase reflect.Slice:\n\t\t\t\tval = in.MapIndex(key)\n\t\t\t\tval = reflect.ValueOf(append([]byte{}, val.Bytes()...))\n\t\t\tdefault:\n\t\t\t\tval = in.MapIndex(key)\n\t\t\t}\n\t\t\tout.SetMapIndex(key, val)\n\t\t}\n\tcase reflect.Ptr:\n\t\tif in.IsNil() {\n\t\t\treturn\n\t\t}\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.New(in.Elem().Type()))\n\t\t}\n\t\tmergeAny(out.Elem(), in.Elem())\n\tcase reflect.Slice:\n\t\tif in.IsNil() {\n\t\t\treturn\n\t\t}\n\t\tif in.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// []byte is a scalar bytes field, not a repeated field.\n\t\t\t// Make a deep copy.\n\t\t\t// Append to []byte{} instead of []byte(nil) so that we never end up\n\t\t\t// with a nil result.\n\t\t\tout.SetBytes(append([]byte{}, in.Bytes()...))\n\t\t\treturn\n\t\t}\n\t\tn := in.Len()\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.MakeSlice(in.Type(), 0, n))\n\t\t}\n\t\tswitch in.Type().Elem().Kind() {\n\t\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,\n\t\t\treflect.String, reflect.Uint32, reflect.Uint64:\n\t\t\tout.Set(reflect.AppendSlice(out, in))\n\t\tdefault:\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tx := reflect.Indirect(reflect.New(in.Type().Elem()))\n\t\t\t\tmergeAny(x, in.Index(i))\n\t\t\t\tout.Set(reflect.Append(out, x))\n\t\t\t}\n\t\t}\n\tcase reflect.Struct:\n\t\tmergeStruct(out, in)\n\tdefault:\n\t\t// unknown type, so not a protocol buffer\n\t\tlog.Printf(\"proto: don't know how to copy %v\", in)\n\t}\n}\n\nfunc mergeExtension(out, in map[int32]Extension) {\n\tfor extNum, eIn := range in {\n\t\teOut := Extension{desc: eIn.desc}\n\t\tif eIn.value != nil {\n\t\t\tv := reflect.New(reflect.TypeOf(eIn.value)).Elem()\n\t\t\tmergeAny(v, reflect.ValueOf(eIn.value))\n\t\t\teOut.value = v.Interface()\n\t\t}\n\t\tif eIn.enc != nil {\n\t\t\teOut.enc = make([]byte, len(eIn.enc))\n\t\t\tcopy(eOut.enc, eIn.enc)\n\t\t}\n\n\t\tout[extNum] = eOut\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/clone_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\nvar cloneTestMessage = &pb.MyMessage{\n\tCount: proto.Int32(42),\n\tName:  proto.String(\"Dave\"),\n\tPet:   []string{\"bunny\", \"kitty\", \"horsey\"},\n\tInner: &pb.InnerMessage{\n\t\tHost:      proto.String(\"niles\"),\n\t\tPort:      proto.Int32(9099),\n\t\tConnected: proto.Bool(true),\n\t},\n\tOthers: []*pb.OtherMessage{\n\t\t{\n\t\t\tValue: []byte(\"some bytes\"),\n\t\t},\n\t},\n\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\tGroupField: proto.Int32(6),\n\t},\n\tRepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")},\n}\n\nfunc init() {\n\text := &pb.Ext{\n\t\tData: proto.String(\"extension\"),\n\t}\n\tif err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {\n\t\tpanic(\"SetExtension: \" + err.Error())\n\t}\n}\n\nfunc TestClone(t *testing.T) {\n\tm := proto.Clone(cloneTestMessage).(*pb.MyMessage)\n\tif !proto.Equal(m, cloneTestMessage) {\n\t\tt.Errorf(\"Clone(%v) = %v\", cloneTestMessage, m)\n\t}\n\n\t// Verify it was a deep copy.\n\t*m.Inner.Port++\n\tif proto.Equal(m, cloneTestMessage) {\n\t\tt.Error(\"Mutating clone changed the original\")\n\t}\n\t// Byte fields and repeated fields should be copied.\n\tif &m.Pet[0] == &cloneTestMessage.Pet[0] {\n\t\tt.Error(\"Pet: repeated field not copied\")\n\t}\n\tif &m.Others[0] == &cloneTestMessage.Others[0] {\n\t\tt.Error(\"Others: repeated field not copied\")\n\t}\n\tif &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {\n\t\tt.Error(\"Others[0].Value: bytes field not copied\")\n\t}\n\tif &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {\n\t\tt.Error(\"RepBytes: repeated field not copied\")\n\t}\n\tif &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {\n\t\tt.Error(\"RepBytes[0]: bytes field not copied\")\n\t}\n}\n\nfunc TestCloneNil(t *testing.T) {\n\tvar m *pb.MyMessage\n\tif c := proto.Clone(m); !proto.Equal(m, c) {\n\t\tt.Errorf(\"Clone(%v) = %v\", m, c)\n\t}\n}\n\nvar mergeTests = []struct {\n\tsrc, dst, want proto.Message\n}{\n\t{\n\t\tsrc: &pb.MyMessage{\n\t\t\tCount: proto.Int32(42),\n\t\t},\n\t\tdst: &pb.MyMessage{\n\t\t\tName: proto.String(\"Dave\"),\n\t\t},\n\t\twant: &pb.MyMessage{\n\t\t\tCount: proto.Int32(42),\n\t\t\tName:  proto.String(\"Dave\"),\n\t\t},\n\t},\n\t{\n\t\tsrc: &pb.MyMessage{\n\t\t\tInner: &pb.InnerMessage{\n\t\t\t\tHost:      proto.String(\"hey\"),\n\t\t\t\tConnected: proto.Bool(true),\n\t\t\t},\n\t\t\tPet: []string{\"horsey\"},\n\t\t\tOthers: []*pb.OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tValue: []byte(\"some bytes\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tdst: &pb.MyMessage{\n\t\t\tInner: &pb.InnerMessage{\n\t\t\t\tHost: proto.String(\"niles\"),\n\t\t\t\tPort: proto.Int32(9099),\n\t\t\t},\n\t\t\tPet: []string{\"bunny\", \"kitty\"},\n\t\t\tOthers: []*pb.OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tKey: proto.Int64(31415926535),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t// Explicitly test a src=nil field\n\t\t\t\t\tInner: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: &pb.MyMessage{\n\t\t\tInner: &pb.InnerMessage{\n\t\t\t\tHost:      proto.String(\"hey\"),\n\t\t\t\tConnected: proto.Bool(true),\n\t\t\t\tPort:      proto.Int32(9099),\n\t\t\t},\n\t\t\tPet: []string{\"bunny\", \"kitty\", \"horsey\"},\n\t\t\tOthers: []*pb.OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tKey: proto.Int64(31415926535),\n\t\t\t\t},\n\t\t\t\t{},\n\t\t\t\t{\n\t\t\t\t\tValue: []byte(\"some bytes\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tsrc: &pb.MyMessage{\n\t\t\tRepBytes: [][]byte{[]byte(\"wow\")},\n\t\t},\n\t\tdst: &pb.MyMessage{\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: proto.Int32(6),\n\t\t\t},\n\t\t\tRepBytes: [][]byte{[]byte(\"sham\")},\n\t\t},\n\t\twant: &pb.MyMessage{\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: proto.Int32(6),\n\t\t\t},\n\t\t\tRepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")},\n\t\t},\n\t},\n\t// Check that a scalar bytes field replaces rather than appends.\n\t{\n\t\tsrc:  &pb.OtherMessage{Value: []byte(\"foo\")},\n\t\tdst:  &pb.OtherMessage{Value: []byte(\"bar\")},\n\t\twant: &pb.OtherMessage{Value: []byte(\"foo\")},\n\t},\n\t{\n\t\tsrc: &pb.MessageWithMap{\n\t\t\tNameMapping: map[int32]string{6: \"Nigel\"},\n\t\t\tMsgMapping: map[int64]*pb.FloatingPoint{\n\t\t\t\t0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},\n\t\t\t},\n\t\t\tByteMapping: map[bool][]byte{true: []byte(\"wowsa\")},\n\t\t},\n\t\tdst: &pb.MessageWithMap{\n\t\t\tNameMapping: map[int32]string{\n\t\t\t\t6: \"Bruce\", // should be overwritten\n\t\t\t\t7: \"Andrew\",\n\t\t\t},\n\t\t},\n\t\twant: &pb.MessageWithMap{\n\t\t\tNameMapping: map[int32]string{\n\t\t\t\t6: \"Nigel\",\n\t\t\t\t7: \"Andrew\",\n\t\t\t},\n\t\t\tMsgMapping: map[int64]*pb.FloatingPoint{\n\t\t\t\t0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},\n\t\t\t},\n\t\t\tByteMapping: map[bool][]byte{true: []byte(\"wowsa\")},\n\t\t},\n\t},\n}\n\nfunc TestMerge(t *testing.T) {\n\tfor _, m := range mergeTests {\n\t\tgot := proto.Clone(m.dst)\n\t\tproto.Merge(got, m.src)\n\t\tif !proto.Equal(got, m.want) {\n\t\t\tt.Errorf(\"Merge(%v, %v)\\n got %v\\nwant %v\\n\", m.dst, m.src, got, m.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/decode.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for decoding protocol buffer data to construct in-memory representations.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n)\n\n// errOverflow is returned when an integer is too large to be represented.\nvar errOverflow = errors.New(\"proto: integer overflow\")\n\n// The fundamental decoders that interpret bytes on the wire.\n// Those that take integer types all return uint64 and are\n// therefore of type valueDecoder.\n\n// DecodeVarint reads a varint-encoded integer from the slice.\n// It returns the integer and the number of bytes consumed, or\n// zero if there is not enough.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc DecodeVarint(buf []byte) (x uint64, n int) {\n\t// x, n already 0\n\tfor shift := uint(0); shift < 64; shift += 7 {\n\t\tif n >= len(buf) {\n\t\t\treturn 0, 0\n\t\t}\n\t\tb := uint64(buf[n])\n\t\tn++\n\t\tx |= (b & 0x7F) << shift\n\t\tif (b & 0x80) == 0 {\n\t\t\treturn x, n\n\t\t}\n\t}\n\n\t// The number is too large to represent in a 64-bit value.\n\treturn 0, 0\n}\n\n// DecodeVarint reads a varint-encoded integer from the Buffer.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc (p *Buffer) DecodeVarint() (x uint64, err error) {\n\t// x, err already 0\n\n\ti := p.index\n\tl := len(p.buf)\n\n\tfor shift := uint(0); shift < 64; shift += 7 {\n\t\tif i >= l {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t\treturn\n\t\t}\n\t\tb := p.buf[i]\n\t\ti++\n\t\tx |= (uint64(b) & 0x7F) << shift\n\t\tif b < 0x80 {\n\t\t\tp.index = i\n\t\t\treturn\n\t\t}\n\t}\n\n\t// The number is too large to represent in a 64-bit value.\n\terr = errOverflow\n\treturn\n}\n\n// DecodeFixed64 reads a 64-bit integer from the Buffer.\n// This is the format for the\n// fixed64, sfixed64, and double protocol buffer types.\nfunc (p *Buffer) DecodeFixed64() (x uint64, err error) {\n\t// x, err already 0\n\ti := p.index + 8\n\tif i < 0 || i > len(p.buf) {\n\t\terr = io.ErrUnexpectedEOF\n\t\treturn\n\t}\n\tp.index = i\n\n\tx = uint64(p.buf[i-8])\n\tx |= uint64(p.buf[i-7]) << 8\n\tx |= uint64(p.buf[i-6]) << 16\n\tx |= uint64(p.buf[i-5]) << 24\n\tx |= uint64(p.buf[i-4]) << 32\n\tx |= uint64(p.buf[i-3]) << 40\n\tx |= uint64(p.buf[i-2]) << 48\n\tx |= uint64(p.buf[i-1]) << 56\n\treturn\n}\n\n// DecodeFixed32 reads a 32-bit integer from the Buffer.\n// This is the format for the\n// fixed32, sfixed32, and float protocol buffer types.\nfunc (p *Buffer) DecodeFixed32() (x uint64, err error) {\n\t// x, err already 0\n\ti := p.index + 4\n\tif i < 0 || i > len(p.buf) {\n\t\terr = io.ErrUnexpectedEOF\n\t\treturn\n\t}\n\tp.index = i\n\n\tx = uint64(p.buf[i-4])\n\tx |= uint64(p.buf[i-3]) << 8\n\tx |= uint64(p.buf[i-2]) << 16\n\tx |= uint64(p.buf[i-1]) << 24\n\treturn\n}\n\n// DecodeZigzag64 reads a zigzag-encoded 64-bit integer\n// from the Buffer.\n// This is the format used for the sint64 protocol buffer type.\nfunc (p *Buffer) DecodeZigzag64() (x uint64, err error) {\n\tx, err = p.DecodeVarint()\n\tif err != nil {\n\t\treturn\n\t}\n\tx = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)\n\treturn\n}\n\n// DecodeZigzag32 reads a zigzag-encoded 32-bit integer\n// from  the Buffer.\n// This is the format used for the sint32 protocol buffer type.\nfunc (p *Buffer) DecodeZigzag32() (x uint64, err error) {\n\tx, err = p.DecodeVarint()\n\tif err != nil {\n\t\treturn\n\t}\n\tx = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))\n\treturn\n}\n\n// These are not ValueDecoders: they produce an array of bytes or a string.\n// bytes, embedded messages\n\n// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.\n// This is the format used for the bytes protocol buffer\n// type and for embedded messages.\nfunc (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {\n\tn, err := p.DecodeVarint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnb := int(n)\n\tif nb < 0 {\n\t\treturn nil, fmt.Errorf(\"proto: bad byte length %d\", nb)\n\t}\n\tend := p.index + nb\n\tif end < p.index || end > len(p.buf) {\n\t\treturn nil, io.ErrUnexpectedEOF\n\t}\n\n\tif !alloc {\n\t\t// todo: check if can get more uses of alloc=false\n\t\tbuf = p.buf[p.index:end]\n\t\tp.index += nb\n\t\treturn\n\t}\n\n\tbuf = make([]byte, nb)\n\tcopy(buf, p.buf[p.index:])\n\tp.index += nb\n\treturn\n}\n\n// DecodeStringBytes reads an encoded string from the Buffer.\n// This is the format used for the proto2 string type.\nfunc (p *Buffer) DecodeStringBytes() (s string, err error) {\n\tbuf, err := p.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn string(buf), nil\n}\n\n// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.\n// If the protocol buffer has extensions, and the field matches, add it as an extension.\n// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.\nfunc (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {\n\toi := o.index\n\n\terr := o.skip(t, tag, wire)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !unrecField.IsValid() {\n\t\treturn nil\n\t}\n\n\tptr := structPointer_Bytes(base, unrecField)\n\n\t// Add the skipped field to struct field\n\tobuf := o.buf\n\n\to.buf = *ptr\n\to.EncodeVarint(uint64(tag<<3 | wire))\n\t*ptr = append(o.buf, obuf[oi:o.index]...)\n\n\to.buf = obuf\n\n\treturn nil\n}\n\n// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.\nfunc (o *Buffer) skip(t reflect.Type, tag, wire int) error {\n\n\tvar u uint64\n\tvar err error\n\n\tswitch wire {\n\tcase WireVarint:\n\t\t_, err = o.DecodeVarint()\n\tcase WireFixed64:\n\t\t_, err = o.DecodeFixed64()\n\tcase WireBytes:\n\t\t_, err = o.DecodeRawBytes(false)\n\tcase WireFixed32:\n\t\t_, err = o.DecodeFixed32()\n\tcase WireStartGroup:\n\t\tfor {\n\t\t\tu, err = o.DecodeVarint()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfwire := int(u & 0x7)\n\t\t\tif fwire == WireEndGroup {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tftag := int(u >> 3)\n\t\t\terr = o.skip(t, ftag, fwire)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tdefault:\n\t\terr = fmt.Errorf(\"proto: can't skip unknown wire type %d for %s\", wire, t)\n\t}\n\treturn err\n}\n\n// Unmarshaler is the interface representing objects that can\n// unmarshal themselves.  The method should reset the receiver before\n// decoding starts.  The argument points to data that may be\n// overwritten, so implementations should not keep references to the\n// buffer.\ntype Unmarshaler interface {\n\tUnmarshal([]byte) error\n}\n\n// Unmarshal parses the protocol buffer representation in buf and places the\n// decoded result in pb.  If the struct underlying pb does not match\n// the data in buf, the results can be unpredictable.\n//\n// Unmarshal resets pb before starting to unmarshal, so any\n// existing data in pb is always removed. Use UnmarshalMerge\n// to preserve and append to existing data.\nfunc Unmarshal(buf []byte, pb Message) error {\n\tpb.Reset()\n\treturn UnmarshalMerge(buf, pb)\n}\n\n// UnmarshalMerge parses the protocol buffer representation in buf and\n// writes the decoded result to pb.  If the struct underlying pb does not match\n// the data in buf, the results can be unpredictable.\n//\n// UnmarshalMerge merges into existing data in pb.\n// Most code should use Unmarshal instead.\nfunc UnmarshalMerge(buf []byte, pb Message) error {\n\t// If the object can unmarshal itself, let it.\n\tif u, ok := pb.(Unmarshaler); ok {\n\t\treturn u.Unmarshal(buf)\n\t}\n\treturn NewBuffer(buf).Unmarshal(pb)\n}\n\n// Unmarshal parses the protocol buffer representation in the\n// Buffer and places the decoded result in pb.  If the struct\n// underlying pb does not match the data in the buffer, the results can be\n// unpredictable.\nfunc (p *Buffer) Unmarshal(pb Message) error {\n\t// If the object can unmarshal itself, let it.\n\tif u, ok := pb.(Unmarshaler); ok {\n\t\terr := u.Unmarshal(p.buf[p.index:])\n\t\tp.index = len(p.buf)\n\t\treturn err\n\t}\n\n\ttyp, base, err := getbase(pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)\n\n\tif collectStats {\n\t\tstats.Decode++\n\t}\n\n\treturn err\n}\n\n// unmarshalType does the work of unmarshaling a structure.\nfunc (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {\n\tvar state errorState\n\trequired, reqFields := prop.reqCount, uint64(0)\n\n\tvar err error\n\tfor err == nil && o.index < len(o.buf) {\n\t\toi := o.index\n\t\tvar u uint64\n\t\tu, err = o.DecodeVarint()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\twire := int(u & 0x7)\n\t\tif wire == WireEndGroup {\n\t\t\tif is_group {\n\t\t\t\treturn nil // input is satisfied\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"proto: %s: wiretype end group for non-group\", st)\n\t\t}\n\t\ttag := int(u >> 3)\n\t\tif tag <= 0 {\n\t\t\treturn fmt.Errorf(\"proto: %s: illegal tag %d (wire type %d)\", st, tag, wire)\n\t\t}\n\t\tfieldnum, ok := prop.decoderTags.get(tag)\n\t\tif !ok {\n\t\t\t// Maybe it's an extension?\n\t\t\tif prop.extendable {\n\t\t\t\tif e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {\n\t\t\t\t\tif err = o.skip(st, tag, wire); err == nil {\n\t\t\t\t\t\text := e.ExtensionMap()[int32(tag)] // may be missing\n\t\t\t\t\t\text.enc = append(ext.enc, o.buf[oi:o.index]...)\n\t\t\t\t\t\te.ExtensionMap()[int32(tag)] = ext\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = o.skipAndSave(st, tag, wire, base, prop.unrecField)\n\t\t\tcontinue\n\t\t}\n\t\tp := prop.Prop[fieldnum]\n\n\t\tif p.dec == nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"proto: no protobuf decoder for %s.%s\\n\", st, st.Field(fieldnum).Name)\n\t\t\tcontinue\n\t\t}\n\t\tdec := p.dec\n\t\tif wire != WireStartGroup && wire != p.WireType {\n\t\t\tif wire == WireBytes && p.packedDec != nil {\n\t\t\t\t// a packable field\n\t\t\t\tdec = p.packedDec\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"proto: bad wiretype for field %s.%s: got wiretype %d, want %d\", st, st.Field(fieldnum).Name, wire, p.WireType)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tdecErr := dec(o, p, base)\n\t\tif decErr != nil && !state.shouldContinue(decErr, p) {\n\t\t\terr = decErr\n\t\t}\n\t\tif err == nil && p.Required {\n\t\t\t// Successfully decoded a required field.\n\t\t\tif tag <= 64 {\n\t\t\t\t// use bitmap for fields 1-64 to catch field reuse.\n\t\t\t\tvar mask uint64 = 1 << uint64(tag-1)\n\t\t\t\tif reqFields&mask == 0 {\n\t\t\t\t\t// new required field\n\t\t\t\t\treqFields |= mask\n\t\t\t\t\trequired--\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// This is imprecise. It can be fooled by a required field\n\t\t\t\t// with a tag > 64 that is encoded twice; that's very rare.\n\t\t\t\t// A fully correct implementation would require allocating\n\t\t\t\t// a data structure, which we would like to avoid.\n\t\t\t\trequired--\n\t\t\t}\n\t\t}\n\t}\n\tif err == nil {\n\t\tif is_group {\n\t\t\treturn io.ErrUnexpectedEOF\n\t\t}\n\t\tif state.err != nil {\n\t\t\treturn state.err\n\t\t}\n\t\tif required > 0 {\n\t\t\t// Not enough information to determine the exact field. If we use extra\n\t\t\t// CPU, we could determine the field only if the missing required field\n\t\t\t// has a tag <= 64 and we check reqFields.\n\t\t\treturn &RequiredNotSetError{\"{Unknown}\"}\n\t\t}\n\t}\n\treturn err\n}\n\n// Individual type decoders\n// For each,\n//\tu is the decoded value,\n//\tv is a pointer to the field (pointer) in the struct\n\n// Sizes of the pools to allocate inside the Buffer.\n// The goal is modest amortization and allocation\n// on at least 16-byte boundaries.\nconst (\n\tboolPoolSize   = 16\n\tuint32PoolSize = 8\n\tuint64PoolSize = 4\n)\n\n// Decode a bool.\nfunc (o *Buffer) dec_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(o.bools) == 0 {\n\t\to.bools = make([]bool, boolPoolSize)\n\t}\n\to.bools[0] = u != 0\n\t*structPointer_Bool(base, p.field) = &o.bools[0]\n\to.bools = o.bools[1:]\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_BoolVal(base, p.field) = u != 0\n\treturn nil\n}\n\n// Decode an int32.\nfunc (o *Buffer) dec_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword32_Set(structPointer_Word32(base, p.field), o, uint32(u))\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))\n\treturn nil\n}\n\n// Decode an int64.\nfunc (o *Buffer) dec_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword64_Set(structPointer_Word64(base, p.field), o, u)\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword64Val_Set(structPointer_Word64Val(base, p.field), o, u)\n\treturn nil\n}\n\n// Decode a string.\nfunc (o *Buffer) dec_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_String(base, p.field) = &s\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_StringVal(base, p.field) = s\n\treturn nil\n}\n\n// Decode a slice of bytes ([]byte).\nfunc (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {\n\tb, err := o.DecodeRawBytes(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_Bytes(base, p.field) = b\n\treturn nil\n}\n\n// Decode a slice of bools ([]bool).\nfunc (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_BoolSlice(base, p.field)\n\t*v = append(*v, u != 0)\n\treturn nil\n}\n\n// Decode a slice of bools ([]bool) in packed format.\nfunc (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {\n\tv := structPointer_BoolSlice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded bools\n\n\ty := *v\n\tfor i := 0; i < nb; i++ {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ty = append(y, u != 0)\n\t}\n\n\t*v = y\n\treturn nil\n}\n\n// Decode a slice of int32s ([]int32).\nfunc (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstructPointer_Word32Slice(base, p.field).Append(uint32(u))\n\treturn nil\n}\n\n// Decode a slice of int32s ([]int32) in packed format.\nfunc (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Slice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded int32s\n\n\tfin := o.index + nb\n\tif fin < o.index {\n\t\treturn errOverflow\n\t}\n\tfor o.index < fin {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Append(uint32(u))\n\t}\n\treturn nil\n}\n\n// Decode a slice of int64s ([]int64).\nfunc (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstructPointer_Word64Slice(base, p.field).Append(u)\n\treturn nil\n}\n\n// Decode a slice of int64s ([]int64) in packed format.\nfunc (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64Slice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded int64s\n\n\tfin := o.index + nb\n\tif fin < o.index {\n\t\treturn errOverflow\n\t}\n\tfor o.index < fin {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Append(u)\n\t}\n\treturn nil\n}\n\n// Decode a slice of strings ([]string).\nfunc (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_StringSlice(base, p.field)\n\t*v = append(*v, s)\n\treturn nil\n}\n\n// Decode a slice of slice of bytes ([][]byte).\nfunc (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {\n\tb, err := o.DecodeRawBytes(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_BytesSlice(base, p.field)\n\t*v = append(*v, b)\n\treturn nil\n}\n\n// Decode a map field.\nfunc (o *Buffer) dec_new_map(p *Properties, base structPointer) error {\n\traw, err := o.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\toi := o.index       // index at the end of this map entry\n\to.index -= len(raw) // move buffer back to start of map entry\n\n\tmptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V\n\tif mptr.Elem().IsNil() {\n\t\tmptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))\n\t}\n\tv := mptr.Elem() // map[K]V\n\n\t// Prepare addressable doubly-indirect placeholders for the key and value types.\n\t// See enc_new_map for why.\n\tkeyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K\n\tkeybase := toStructPointer(keyptr.Addr())                  // **K\n\n\tvar valbase structPointer\n\tvar valptr reflect.Value\n\tswitch p.mtype.Elem().Kind() {\n\tcase reflect.Slice:\n\t\t// []byte\n\t\tvar dummy []byte\n\t\tvalptr = reflect.ValueOf(&dummy)  // *[]byte\n\t\tvalbase = toStructPointer(valptr) // *[]byte\n\tcase reflect.Ptr:\n\t\t// message; valptr is **Msg; need to allocate the intermediate pointer\n\t\tvalptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V\n\t\tvalptr.Set(reflect.New(valptr.Type().Elem()))\n\t\tvalbase = toStructPointer(valptr)\n\tdefault:\n\t\t// everything else\n\t\tvalptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V\n\t\tvalbase = toStructPointer(valptr.Addr())                   // **V\n\t}\n\n\t// Decode.\n\t// This parses a restricted wire format, namely the encoding of a message\n\t// with two fields. See enc_new_map for the format.\n\tfor o.index < oi {\n\t\t// tagcode for key and value properties are always a single byte\n\t\t// because they have tags 1 and 2.\n\t\ttagcode := o.buf[o.index]\n\t\to.index++\n\t\tswitch tagcode {\n\t\tcase p.mkeyprop.tagcode[0]:\n\t\t\tif err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase p.mvalprop.tagcode[0]:\n\t\t\tif err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO: Should we silently skip this instead?\n\t\t\treturn fmt.Errorf(\"proto: bad map data tag %d\", raw[0])\n\t\t}\n\t}\n\tkeyelem, valelem := keyptr.Elem(), valptr.Elem()\n\tif !keyelem.IsValid() || !valelem.IsValid() {\n\t\t// We did not decode the key or the value in the map entry.\n\t\t// Either way, it's an invalid map entry.\n\t\treturn fmt.Errorf(\"proto: bad map data: missing key/val\")\n\t}\n\n\tv.SetMapIndex(keyelem, valelem)\n\treturn nil\n}\n\n// Decode a group.\nfunc (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {\n\tbas := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(bas) {\n\t\t// allocate new nested message\n\t\tbas = toStructPointer(reflect.New(p.stype))\n\t\tstructPointer_SetStructPointer(base, p.field, bas)\n\t}\n\treturn o.unmarshalType(p.stype, p.sprop, true, bas)\n}\n\n// Decode an embedded message.\nfunc (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {\n\traw, e := o.DecodeRawBytes(false)\n\tif e != nil {\n\t\treturn e\n\t}\n\n\tbas := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(bas) {\n\t\t// allocate new nested message\n\t\tbas = toStructPointer(reflect.New(p.stype))\n\t\tstructPointer_SetStructPointer(base, p.field, bas)\n\t}\n\n\t// If the object can unmarshal itself, let it.\n\tif p.isUnmarshaler {\n\t\tiv := structPointer_Interface(bas, p.stype)\n\t\treturn iv.(Unmarshaler).Unmarshal(raw)\n\t}\n\n\tobuf := o.buf\n\toi := o.index\n\to.buf = raw\n\to.index = 0\n\n\terr = o.unmarshalType(p.stype, p.sprop, false, bas)\n\to.buf = obuf\n\to.index = oi\n\n\treturn err\n}\n\n// Decode a slice of embedded messages.\nfunc (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {\n\treturn o.dec_slice_struct(p, false, base)\n}\n\n// Decode a slice of embedded groups.\nfunc (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {\n\treturn o.dec_slice_struct(p, true, base)\n}\n\n// Decode a slice of structs ([]*struct).\nfunc (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {\n\tv := reflect.New(p.stype)\n\tbas := toStructPointer(v)\n\tstructPointer_StructPointerSlice(base, p.field).Append(bas)\n\n\tif is_group {\n\t\terr := o.unmarshalType(p.stype, p.sprop, is_group, bas)\n\t\treturn err\n\t}\n\n\traw, err := o.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the object can unmarshal itself, let it.\n\tif p.isUnmarshaler {\n\t\tiv := v.Interface()\n\t\treturn iv.(Unmarshaler).Unmarshal(raw)\n\t}\n\n\tobuf := o.buf\n\toi := o.index\n\to.buf = raw\n\to.index = 0\n\n\terr = o.unmarshalType(p.stype, p.sprop, is_group, bas)\n\n\to.buf = obuf\n\to.index = oi\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/encode.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for encoding data into the wire format for protocol buffers.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n)\n\n// RequiredNotSetError is the error returned if Marshal is called with\n// a protocol buffer struct whose required fields have not\n// all been initialized. It is also the error returned if Unmarshal is\n// called with an encoded protocol buffer that does not include all the\n// required fields.\n//\n// When printed, RequiredNotSetError reports the first unset required field in a\n// message. If the field cannot be precisely determined, it is reported as\n// \"{Unknown}\".\ntype RequiredNotSetError struct {\n\tfield string\n}\n\nfunc (e *RequiredNotSetError) Error() string {\n\treturn fmt.Sprintf(\"proto: required field %q not set\", e.field)\n}\n\nvar (\n\t// errRepeatedHasNil is the error returned if Marshal is called with\n\t// a struct with a repeated field containing a nil element.\n\terrRepeatedHasNil = errors.New(\"proto: repeated field has nil element\")\n\n\t// ErrNil is the error returned if Marshal is called with nil.\n\tErrNil = errors.New(\"proto: Marshal called with nil\")\n)\n\n// The fundamental encoders that put bytes on the wire.\n// Those that take integer types all accept uint64 and are\n// therefore of type valueEncoder.\n\nconst maxVarintBytes = 10 // maximum length of a varint\n\n// EncodeVarint returns the varint encoding of x.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\n// Not used by the package itself, but helpful to clients\n// wishing to use the same encoding.\nfunc EncodeVarint(x uint64) []byte {\n\tvar buf [maxVarintBytes]byte\n\tvar n int\n\tfor n = 0; x > 127; n++ {\n\t\tbuf[n] = 0x80 | uint8(x&0x7F)\n\t\tx >>= 7\n\t}\n\tbuf[n] = uint8(x)\n\tn++\n\treturn buf[0:n]\n}\n\n// EncodeVarint writes a varint-encoded integer to the Buffer.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc (p *Buffer) EncodeVarint(x uint64) error {\n\tfor x >= 1<<7 {\n\t\tp.buf = append(p.buf, uint8(x&0x7f|0x80))\n\t\tx >>= 7\n\t}\n\tp.buf = append(p.buf, uint8(x))\n\treturn nil\n}\n\nfunc sizeVarint(x uint64) (n int) {\n\tfor {\n\t\tn++\n\t\tx >>= 7\n\t\tif x == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn n\n}\n\n// EncodeFixed64 writes a 64-bit integer to the Buffer.\n// This is the format for the\n// fixed64, sfixed64, and double protocol buffer types.\nfunc (p *Buffer) EncodeFixed64(x uint64) error {\n\tp.buf = append(p.buf,\n\t\tuint8(x),\n\t\tuint8(x>>8),\n\t\tuint8(x>>16),\n\t\tuint8(x>>24),\n\t\tuint8(x>>32),\n\t\tuint8(x>>40),\n\t\tuint8(x>>48),\n\t\tuint8(x>>56))\n\treturn nil\n}\n\nfunc sizeFixed64(x uint64) int {\n\treturn 8\n}\n\n// EncodeFixed32 writes a 32-bit integer to the Buffer.\n// This is the format for the\n// fixed32, sfixed32, and float protocol buffer types.\nfunc (p *Buffer) EncodeFixed32(x uint64) error {\n\tp.buf = append(p.buf,\n\t\tuint8(x),\n\t\tuint8(x>>8),\n\t\tuint8(x>>16),\n\t\tuint8(x>>24))\n\treturn nil\n}\n\nfunc sizeFixed32(x uint64) int {\n\treturn 4\n}\n\n// EncodeZigzag64 writes a zigzag-encoded 64-bit integer\n// to the Buffer.\n// This is the format used for the sint64 protocol buffer type.\nfunc (p *Buffer) EncodeZigzag64(x uint64) error {\n\t// use signed number to get arithmetic right shift.\n\treturn p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))\n}\n\nfunc sizeZigzag64(x uint64) int {\n\treturn sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))\n}\n\n// EncodeZigzag32 writes a zigzag-encoded 32-bit integer\n// to the Buffer.\n// This is the format used for the sint32 protocol buffer type.\nfunc (p *Buffer) EncodeZigzag32(x uint64) error {\n\t// use signed number to get arithmetic right shift.\n\treturn p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))\n}\n\nfunc sizeZigzag32(x uint64) int {\n\treturn sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))\n}\n\n// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.\n// This is the format used for the bytes protocol buffer\n// type and for embedded messages.\nfunc (p *Buffer) EncodeRawBytes(b []byte) error {\n\tp.EncodeVarint(uint64(len(b)))\n\tp.buf = append(p.buf, b...)\n\treturn nil\n}\n\nfunc sizeRawBytes(b []byte) int {\n\treturn sizeVarint(uint64(len(b))) +\n\t\tlen(b)\n}\n\n// EncodeStringBytes writes an encoded string to the Buffer.\n// This is the format used for the proto2 string type.\nfunc (p *Buffer) EncodeStringBytes(s string) error {\n\tp.EncodeVarint(uint64(len(s)))\n\tp.buf = append(p.buf, s...)\n\treturn nil\n}\n\nfunc sizeStringBytes(s string) int {\n\treturn sizeVarint(uint64(len(s))) +\n\t\tlen(s)\n}\n\n// Marshaler is the interface representing objects that can marshal themselves.\ntype Marshaler interface {\n\tMarshal() ([]byte, error)\n}\n\n// Marshal takes the protocol buffer\n// and encodes it into the wire format, returning the data.\nfunc Marshal(pb Message) ([]byte, error) {\n\t// Can the object marshal itself?\n\tif m, ok := pb.(Marshaler); ok {\n\t\treturn m.Marshal()\n\t}\n\tp := NewBuffer(nil)\n\terr := p.Marshal(pb)\n\tvar state errorState\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn nil, err\n\t}\n\tif p.buf == nil && err == nil {\n\t\t// Return a non-nil slice on success.\n\t\treturn []byte{}, nil\n\t}\n\treturn p.buf, err\n}\n\n// Marshal takes the protocol buffer\n// and encodes it into the wire format, writing the result to the\n// Buffer.\nfunc (p *Buffer) Marshal(pb Message) error {\n\t// Can the object marshal itself?\n\tif m, ok := pb.(Marshaler); ok {\n\t\tdata, err := m.Marshal()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.buf = append(p.buf, data...)\n\t\treturn nil\n\t}\n\n\tt, base, err := getbase(pb)\n\tif structPointer_IsNil(base) {\n\t\treturn ErrNil\n\t}\n\tif err == nil {\n\t\terr = p.enc_struct(GetProperties(t.Elem()), base)\n\t}\n\n\tif collectStats {\n\t\tstats.Encode++\n\t}\n\n\treturn err\n}\n\n// Size returns the encoded size of a protocol buffer.\nfunc Size(pb Message) (n int) {\n\t// Can the object marshal itself?  If so, Size is slow.\n\t// TODO: add Size to Marshaler, or add a Sizer interface.\n\tif m, ok := pb.(Marshaler); ok {\n\t\tb, _ := m.Marshal()\n\t\treturn len(b)\n\t}\n\n\tt, base, err := getbase(pb)\n\tif structPointer_IsNil(base) {\n\t\treturn 0\n\t}\n\tif err == nil {\n\t\tn = size_struct(GetProperties(t.Elem()), base)\n\t}\n\n\tif collectStats {\n\t\tstats.Size++\n\t}\n\n\treturn\n}\n\n// Individual type encoders.\n\n// Encode a bool.\nfunc (o *Buffer) enc_bool(p *Properties, base structPointer) error {\n\tv := *structPointer_Bool(base, p.field)\n\tif v == nil {\n\t\treturn ErrNil\n\t}\n\tx := 0\n\tif *v {\n\t\tx = 1\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {\n\tv := *structPointer_BoolVal(base, p.field)\n\tif !v {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, 1)\n\treturn nil\n}\n\nfunc size_bool(p *Properties, base structPointer) int {\n\tv := *structPointer_Bool(base, p.field)\n\tif v == nil {\n\t\treturn 0\n\t}\n\treturn len(p.tagcode) + 1 // each bool takes exactly one byte\n}\n\nfunc size_proto3_bool(p *Properties, base structPointer) int {\n\tv := *structPointer_BoolVal(base, p.field)\n\tif !v {\n\t\treturn 0\n\t}\n\treturn len(p.tagcode) + 1 // each bool takes exactly one byte\n}\n\n// Encode an int32.\nfunc (o *Buffer) enc_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := int32(word32_Get(v)) // permit sign extension to use full 64-bit range\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc size_int32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := int32(word32_Get(v)) // permit sign extension to use full 64-bit range\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\nfunc size_proto3_int32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range\n\tif x == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\n// Encode a uint32.\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_uint32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := word32_Get(v)\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := word32Val_Get(v)\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc size_uint32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := word32_Get(v)\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\nfunc size_proto3_uint32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := word32Val_Get(v)\n\tif x == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\n// Encode an int64.\nfunc (o *Buffer) enc_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64(base, p.field)\n\tif word64_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := word64_Get(v)\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, x)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64Val(base, p.field)\n\tx := word64Val_Get(v)\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, x)\n\treturn nil\n}\n\nfunc size_int64(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word64(base, p.field)\n\tif word64_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := word64_Get(v)\n\tn += len(p.tagcode)\n\tn += p.valSize(x)\n\treturn\n}\n\nfunc size_proto3_int64(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word64Val(base, p.field)\n\tx := word64Val_Get(v)\n\tif x == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(x)\n\treturn\n}\n\n// Encode a string.\nfunc (o *Buffer) enc_string(p *Properties, base structPointer) error {\n\tv := *structPointer_String(base, p.field)\n\tif v == nil {\n\t\treturn ErrNil\n\t}\n\tx := *v\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeStringBytes(x)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {\n\tv := *structPointer_StringVal(base, p.field)\n\tif v == \"\" {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeStringBytes(v)\n\treturn nil\n}\n\nfunc size_string(p *Properties, base structPointer) (n int) {\n\tv := *structPointer_String(base, p.field)\n\tif v == nil {\n\t\treturn 0\n\t}\n\tx := *v\n\tn += len(p.tagcode)\n\tn += sizeStringBytes(x)\n\treturn\n}\n\nfunc size_proto3_string(p *Properties, base structPointer) (n int) {\n\tv := *structPointer_StringVal(base, p.field)\n\tif v == \"\" {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeStringBytes(v)\n\treturn\n}\n\n// All protocol buffer fields are nillable, but be careful.\nfunc isNil(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:\n\t\treturn v.IsNil()\n\t}\n\treturn false\n}\n\n// Encode a message struct.\nfunc (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {\n\tvar state errorState\n\tstructp := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(structp) {\n\t\treturn ErrNil\n\t}\n\n\t// Can the object marshal itself?\n\tif p.isMarshaler {\n\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\tdata, err := m.Marshal()\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\treturn err\n\t\t}\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeRawBytes(data)\n\t\treturn nil\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\treturn o.enc_len_struct(p.sprop, structp, &state)\n}\n\nfunc size_struct_message(p *Properties, base structPointer) int {\n\tstructp := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(structp) {\n\t\treturn 0\n\t}\n\n\t// Can the object marshal itself?\n\tif p.isMarshaler {\n\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\tdata, _ := m.Marshal()\n\t\tn0 := len(p.tagcode)\n\t\tn1 := sizeRawBytes(data)\n\t\treturn n0 + n1\n\t}\n\n\tn0 := len(p.tagcode)\n\tn1 := size_struct(p.sprop, structp)\n\tn2 := sizeVarint(uint64(n1)) // size of encoded length\n\treturn n0 + n1 + n2\n}\n\n// Encode a group struct.\nfunc (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {\n\tvar state errorState\n\tb := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(b) {\n\t\treturn ErrNil\n\t}\n\n\to.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\terr := o.enc_struct(p.sprop, b)\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn err\n\t}\n\to.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\treturn state.err\n}\n\nfunc size_struct_group(p *Properties, base structPointer) (n int) {\n\tb := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(b) {\n\t\treturn 0\n\t}\n\n\tn += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\tn += size_struct(p.sprop, b)\n\tn += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\treturn\n}\n\n// Encode a slice of bools ([]bool).\nfunc (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor _, x := range s {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tv := uint64(0)\n\t\tif x {\n\t\t\tv = 1\n\t\t}\n\t\tp.valEnc(o, v)\n\t}\n\treturn nil\n}\n\nfunc size_slice_bool(p *Properties, base structPointer) int {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\treturn l * (len(p.tagcode) + 1) // each bool takes exactly one byte\n}\n\n// Encode a slice of bools ([]bool) in packed format.\nfunc (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(l)) // each bool takes exactly one byte\n\tfor _, x := range s {\n\t\tv := uint64(0)\n\t\tif x {\n\t\t\tv = 1\n\t\t}\n\t\tp.valEnc(o, v)\n\t}\n\treturn nil\n}\n\nfunc size_slice_packed_bool(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(l))\n\tn += l // each bool takes exactly one byte\n\treturn\n}\n\n// Encode a slice of bytes ([]byte).\nfunc (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {\n\ts := *structPointer_Bytes(base, p.field)\n\tif s == nil {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeRawBytes(s)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {\n\ts := *structPointer_Bytes(base, p.field)\n\tif len(s) == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeRawBytes(s)\n\treturn nil\n}\n\nfunc size_slice_byte(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_Bytes(base, p.field)\n\tif s == nil {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeRawBytes(s)\n\treturn\n}\n\nfunc size_proto3_slice_byte(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_Bytes(base, p.field)\n\tif len(s) == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeRawBytes(s)\n\treturn\n}\n\n// Encode a slice of int32s ([]int32).\nfunc (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tp.valEnc(o, uint64(x))\n\t}\n\treturn nil\n}\n\nfunc size_slice_int32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tn += p.valSize(uint64(x))\n\t}\n\treturn\n}\n\n// Encode a slice of int32s ([]int32) in packed format.\nfunc (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tp.valEnc(buf, uint64(x))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_int32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tbufSize += p.valSize(uint64(x))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of uint32s ([]uint32).\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tx := s.Index(i)\n\t\tp.valEnc(o, uint64(x))\n\t}\n\treturn nil\n}\n\nfunc size_slice_uint32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tx := s.Index(i)\n\t\tn += p.valSize(uint64(x))\n\t}\n\treturn\n}\n\n// Encode a slice of uint32s ([]uint32) in packed format.\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tp.valEnc(buf, uint64(s.Index(i)))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_uint32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tbufSize += p.valSize(uint64(s.Index(i)))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of int64s ([]int64).\nfunc (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tp.valEnc(o, s.Index(i))\n\t}\n\treturn nil\n}\n\nfunc size_slice_int64(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tn += p.valSize(s.Index(i))\n\t}\n\treturn\n}\n\n// Encode a slice of int64s ([]int64) in packed format.\nfunc (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tp.valEnc(buf, s.Index(i))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_int64(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tbufSize += p.valSize(s.Index(i))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of slice of bytes ([][]byte).\nfunc (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {\n\tss := *structPointer_BytesSlice(base, p.field)\n\tl := len(ss)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeRawBytes(ss[i])\n\t}\n\treturn nil\n}\n\nfunc size_slice_slice_byte(p *Properties, base structPointer) (n int) {\n\tss := *structPointer_BytesSlice(base, p.field)\n\tl := len(ss)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tn += sizeRawBytes(ss[i])\n\t}\n\treturn\n}\n\n// Encode a slice of strings ([]string).\nfunc (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {\n\tss := *structPointer_StringSlice(base, p.field)\n\tl := len(ss)\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeStringBytes(ss[i])\n\t}\n\treturn nil\n}\n\nfunc size_slice_string(p *Properties, base structPointer) (n int) {\n\tss := *structPointer_StringSlice(base, p.field)\n\tl := len(ss)\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tn += sizeStringBytes(ss[i])\n\t}\n\treturn\n}\n\n// Encode a slice of message structs ([]*struct).\nfunc (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {\n\tvar state errorState\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tfor i := 0; i < l; i++ {\n\t\tstructp := s.Index(i)\n\t\tif structPointer_IsNil(structp) {\n\t\t\treturn errRepeatedHasNil\n\t\t}\n\n\t\t// Can the object marshal itself?\n\t\tif p.isMarshaler {\n\t\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\t\tdata, err := m.Marshal()\n\t\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\to.buf = append(o.buf, p.tagcode...)\n\t\t\to.EncodeRawBytes(data)\n\t\t\tcontinue\n\t\t}\n\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\terr := o.enc_len_struct(p.sprop, structp, &state)\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\tif err == ErrNil {\n\t\t\t\treturn errRepeatedHasNil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn state.err\n}\n\nfunc size_slice_struct_message(p *Properties, base structPointer) (n int) {\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tstructp := s.Index(i)\n\t\tif structPointer_IsNil(structp) {\n\t\t\treturn // return the size up to this point\n\t\t}\n\n\t\t// Can the object marshal itself?\n\t\tif p.isMarshaler {\n\t\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\t\tdata, _ := m.Marshal()\n\t\t\tn += len(p.tagcode)\n\t\t\tn += sizeRawBytes(data)\n\t\t\tcontinue\n\t\t}\n\n\t\tn0 := size_struct(p.sprop, structp)\n\t\tn1 := sizeVarint(uint64(n0)) // size of encoded length\n\t\tn += n0 + n1\n\t}\n\treturn\n}\n\n// Encode a slice of group structs ([]*struct).\nfunc (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {\n\tvar state errorState\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tfor i := 0; i < l; i++ {\n\t\tb := s.Index(i)\n\t\tif structPointer_IsNil(b) {\n\t\t\treturn errRepeatedHasNil\n\t\t}\n\n\t\to.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\n\t\terr := o.enc_struct(p.sprop, b)\n\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\tif err == ErrNil {\n\t\t\t\treturn errRepeatedHasNil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\to.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\t}\n\treturn state.err\n}\n\nfunc size_slice_struct_group(p *Properties, base structPointer) (n int) {\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tn += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))\n\tn += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))\n\tfor i := 0; i < l; i++ {\n\t\tb := s.Index(i)\n\t\tif structPointer_IsNil(b) {\n\t\t\treturn // return size up to this point\n\t\t}\n\n\t\tn += size_struct(p.sprop, b)\n\t}\n\treturn\n}\n\n// Encode an extension map.\nfunc (o *Buffer) enc_map(p *Properties, base structPointer) error {\n\tv := *structPointer_ExtMap(base, p.field)\n\tif err := encodeExtensionMap(v); err != nil {\n\t\treturn err\n\t}\n\t// Fast-path for common cases: zero or one extensions.\n\tif len(v) <= 1 {\n\t\tfor _, e := range v {\n\t\t\to.buf = append(o.buf, e.enc...)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Sort keys to provide a deterministic encoding.\n\tkeys := make([]int, 0, len(v))\n\tfor k := range v {\n\t\tkeys = append(keys, int(k))\n\t}\n\tsort.Ints(keys)\n\n\tfor _, k := range keys {\n\t\to.buf = append(o.buf, v[int32(k)].enc...)\n\t}\n\treturn nil\n}\n\nfunc size_map(p *Properties, base structPointer) int {\n\tv := *structPointer_ExtMap(base, p.field)\n\treturn sizeExtensionMap(v)\n}\n\n// Encode a map field.\nfunc (o *Buffer) enc_new_map(p *Properties, base structPointer) error {\n\tvar state errorState // XXX: or do we need to plumb this through?\n\n\t/*\n\t\tA map defined as\n\t\t\tmap<key_type, value_type> map_field = N;\n\t\tis encoded in the same way as\n\t\t\tmessage MapFieldEntry {\n\t\t\t\tkey_type key = 1;\n\t\t\t\tvalue_type value = 2;\n\t\t\t}\n\t\t\trepeated MapFieldEntry map_field = N;\n\t*/\n\n\tv := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V\n\tif v.Len() == 0 {\n\t\treturn nil\n\t}\n\n\tkeycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)\n\n\tenc := func() error {\n\t\tif err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tkeys := v.MapKeys()\n\tsort.Sort(mapKeys(keys))\n\tfor _, key := range keys {\n\t\tval := v.MapIndex(key)\n\n\t\tkeycopy.Set(key)\n\t\tvalcopy.Set(val)\n\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tif err := o.enc_len_thing(enc, &state); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc size_new_map(p *Properties, base structPointer) int {\n\tv := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V\n\n\tkeycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)\n\n\tn := 0\n\tfor _, key := range v.MapKeys() {\n\t\tval := v.MapIndex(key)\n\t\tkeycopy.Set(key)\n\t\tvalcopy.Set(val)\n\n\t\t// Tag codes for key and val are the responsibility of the sub-sizer.\n\t\tkeysize := p.mkeyprop.size(p.mkeyprop, keybase)\n\t\tvalsize := p.mvalprop.size(p.mvalprop, valbase)\n\t\tentry := keysize + valsize\n\t\t// Add on tag code and length of map entry itself.\n\t\tn += len(p.tagcode) + sizeVarint(uint64(entry)) + entry\n\t}\n\treturn n\n}\n\n// mapEncodeScratch returns a new reflect.Value matching the map's value type,\n// and a structPointer suitable for passing to an encoder or sizer.\nfunc mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {\n\t// Prepare addressable doubly-indirect placeholders for the key and value types.\n\t// This is needed because the element-type encoders expect **T, but the map iteration produces T.\n\n\tkeycopy = reflect.New(mapType.Key()).Elem()                 // addressable K\n\tkeyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K\n\tkeyptr.Set(keycopy.Addr())                                  //\n\tkeybase = toStructPointer(keyptr.Addr())                    // **K\n\n\t// Value types are more varied and require special handling.\n\tswitch mapType.Elem().Kind() {\n\tcase reflect.Slice:\n\t\t// []byte\n\t\tvar dummy []byte\n\t\tvalcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte\n\t\tvalbase = toStructPointer(valcopy.Addr())\n\tcase reflect.Ptr:\n\t\t// message; the generated field type is map[K]*Msg (so V is *Msg),\n\t\t// so we only need one level of indirection.\n\t\tvalcopy = reflect.New(mapType.Elem()).Elem() // addressable V\n\t\tvalbase = toStructPointer(valcopy.Addr())\n\tdefault:\n\t\t// everything else\n\t\tvalcopy = reflect.New(mapType.Elem()).Elem()                // addressable V\n\t\tvalptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V\n\t\tvalptr.Set(valcopy.Addr())                                  //\n\t\tvalbase = toStructPointer(valptr.Addr())                    // **V\n\t}\n\treturn\n}\n\n// Encode a struct.\nfunc (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {\n\tvar state errorState\n\t// Encode fields in tag order so that decoders may use optimizations\n\t// that depend on the ordering.\n\t// https://developers.google.com/protocol-buffers/docs/encoding#order\n\tfor _, i := range prop.order {\n\t\tp := prop.Prop[i]\n\t\tif p.enc != nil {\n\t\t\terr := p.enc(o, p, base)\n\t\t\tif err != nil {\n\t\t\t\tif err == ErrNil {\n\t\t\t\t\tif p.Required && state.err == nil {\n\t\t\t\t\t\tstate.err = &RequiredNotSetError{p.Name}\n\t\t\t\t\t}\n\t\t\t\t} else if err == errRepeatedHasNil {\n\t\t\t\t\t// Give more context to nil values in repeated fields.\n\t\t\t\t\treturn errors.New(\"repeated field \" + p.OrigName + \" has nil element\")\n\t\t\t\t} else if !state.shouldContinue(err, p) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Add unrecognized fields at the end.\n\tif prop.unrecField.IsValid() {\n\t\tv := *structPointer_Bytes(base, prop.unrecField)\n\t\tif len(v) > 0 {\n\t\t\to.buf = append(o.buf, v...)\n\t\t}\n\t}\n\n\treturn state.err\n}\n\nfunc size_struct(prop *StructProperties, base structPointer) (n int) {\n\tfor _, i := range prop.order {\n\t\tp := prop.Prop[i]\n\t\tif p.size != nil {\n\t\t\tn += p.size(p, base)\n\t\t}\n\t}\n\n\t// Add unrecognized fields at the end.\n\tif prop.unrecField.IsValid() {\n\t\tv := *structPointer_Bytes(base, prop.unrecField)\n\t\tn += len(v)\n\t}\n\n\treturn\n}\n\nvar zeroes [20]byte // longer than any conceivable sizeVarint\n\n// Encode a struct, preceded by its encoded length (as a varint).\nfunc (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {\n\treturn o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)\n}\n\n// Encode something, preceded by its encoded length (as a varint).\nfunc (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {\n\tiLen := len(o.buf)\n\to.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length\n\tiMsg := len(o.buf)\n\terr := enc()\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn err\n\t}\n\tlMsg := len(o.buf) - iMsg\n\tlLen := sizeVarint(uint64(lMsg))\n\tswitch x := lLen - (iMsg - iLen); {\n\tcase x > 0: // actual length is x bytes larger than the space we reserved\n\t\t// Move msg x bytes right.\n\t\to.buf = append(o.buf, zeroes[:x]...)\n\t\tcopy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])\n\tcase x < 0: // actual length is x bytes smaller than the space we reserved\n\t\t// Move msg x bytes left.\n\t\tcopy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])\n\t\to.buf = o.buf[:len(o.buf)+x] // x is negative\n\t}\n\t// Encode the length in the reserved space.\n\to.buf = o.buf[:iLen]\n\to.EncodeVarint(uint64(lMsg))\n\to.buf = o.buf[:len(o.buf)+lMsg]\n\treturn state.err\n}\n\n// errorState maintains the first error that occurs and updates that error\n// with additional context.\ntype errorState struct {\n\terr error\n}\n\n// shouldContinue reports whether encoding should continue upon encountering the\n// given error. If the error is RequiredNotSetError, shouldContinue returns true\n// and, if this is the first appearance of that error, remembers it for future\n// reporting.\n//\n// If prop is not nil, it may update any error with additional context about the\n// field with the error.\nfunc (s *errorState) shouldContinue(err error, prop *Properties) bool {\n\t// Ignore unset required fields.\n\treqNotSet, ok := err.(*RequiredNotSetError)\n\tif !ok {\n\t\treturn false\n\t}\n\tif s.err == nil {\n\t\tif prop != nil {\n\t\t\terr = &RequiredNotSetError{prop.Name + \".\" + reqNotSet.field}\n\t\t}\n\t\ts.err = err\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/equal.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Protocol buffer comparison.\n// TODO: MessageSet.\n\npackage proto\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n/*\nEqual returns true iff protocol buffers a and b are equal.\nThe arguments must both be pointers to protocol buffer structs.\n\nEquality is defined in this way:\n  - Two messages are equal iff they are the same type,\n    corresponding fields are equal, unknown field sets\n    are equal, and extensions sets are equal.\n  - Two set scalar fields are equal iff their values are equal.\n    If the fields are of a floating-point type, remember that\n    NaN != x for all x, including NaN.\n  - Two repeated fields are equal iff their lengths are the same,\n    and their corresponding elements are equal (a \"bytes\" field,\n    although represented by []byte, is not a repeated field)\n  - Two unset fields are equal.\n  - Two unknown field sets are equal if their current\n    encoded state is equal.\n  - Two extension sets are equal iff they have corresponding\n    elements that are pairwise equal.\n  - Every other combination of things are not equal.\n\nThe return value is undefined if a and b are not protocol buffers.\n*/\nfunc Equal(a, b Message) bool {\n\tif a == nil || b == nil {\n\t\treturn a == b\n\t}\n\tv1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)\n\tif v1.Type() != v2.Type() {\n\t\treturn false\n\t}\n\tif v1.Kind() == reflect.Ptr {\n\t\tif v1.IsNil() {\n\t\t\treturn v2.IsNil()\n\t\t}\n\t\tif v2.IsNil() {\n\t\t\treturn false\n\t\t}\n\t\tv1, v2 = v1.Elem(), v2.Elem()\n\t}\n\tif v1.Kind() != reflect.Struct {\n\t\treturn false\n\t}\n\treturn equalStruct(v1, v2)\n}\n\n// v1 and v2 are known to have the same type.\nfunc equalStruct(v1, v2 reflect.Value) bool {\n\tfor i := 0; i < v1.NumField(); i++ {\n\t\tf := v1.Type().Field(i)\n\t\tif strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tcontinue\n\t\t}\n\t\tf1, f2 := v1.Field(i), v2.Field(i)\n\t\tif f.Type.Kind() == reflect.Ptr {\n\t\t\tif n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {\n\t\t\t\t// both unset\n\t\t\t\tcontinue\n\t\t\t} else if n1 != n2 {\n\t\t\t\t// set/unset mismatch\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tb1, ok := f1.Interface().(raw)\n\t\t\tif ok {\n\t\t\t\tb2 := f2.Interface().(raw)\n\t\t\t\t// RawMessage\n\t\t\t\tif !bytes.Equal(b1.Bytes(), b2.Bytes()) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf1, f2 = f1.Elem(), f2.Elem()\n\t\t}\n\t\tif !equalAny(f1, f2) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif em1 := v1.FieldByName(\"XXX_extensions\"); em1.IsValid() {\n\t\tem2 := v2.FieldByName(\"XXX_extensions\")\n\t\tif !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tuf := v1.FieldByName(\"XXX_unrecognized\")\n\tif !uf.IsValid() {\n\t\treturn true\n\t}\n\n\tu1 := uf.Bytes()\n\tu2 := v2.FieldByName(\"XXX_unrecognized\").Bytes()\n\tif !bytes.Equal(u1, u2) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// v1 and v2 are known to have the same type.\nfunc equalAny(v1, v2 reflect.Value) bool {\n\tif v1.Type() == protoMessageType {\n\t\tm1, _ := v1.Interface().(Message)\n\t\tm2, _ := v2.Interface().(Message)\n\t\treturn Equal(m1, m2)\n\t}\n\tswitch v1.Kind() {\n\tcase reflect.Bool:\n\t\treturn v1.Bool() == v2.Bool()\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v1.Float() == v2.Float()\n\tcase reflect.Int32, reflect.Int64:\n\t\treturn v1.Int() == v2.Int()\n\tcase reflect.Map:\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tfor _, key := range v1.MapKeys() {\n\t\t\tval2 := v2.MapIndex(key)\n\t\t\tif !val2.IsValid() {\n\t\t\t\t// This key was not found in the second map.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !equalAny(v1.MapIndex(key), val2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.Ptr:\n\t\treturn equalAny(v1.Elem(), v2.Elem())\n\tcase reflect.Slice:\n\t\tif v1.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// short circuit: []byte\n\t\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))\n\t\t}\n\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif !equalAny(v1.Index(i), v2.Index(i)) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.String:\n\t\treturn v1.Interface().(string) == v2.Interface().(string)\n\tcase reflect.Struct:\n\t\treturn equalStruct(v1, v2)\n\tcase reflect.Uint32, reflect.Uint64:\n\t\treturn v1.Uint() == v2.Uint()\n\t}\n\n\t// unknown type, so not a protocol buffer\n\tlog.Printf(\"proto: don't know how to compare %v\", v1)\n\treturn false\n}\n\n// base is the struct type that the extensions are based on.\n// em1 and em2 are extension maps.\nfunc equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {\n\tif len(em1) != len(em2) {\n\t\treturn false\n\t}\n\n\tfor extNum, e1 := range em1 {\n\t\te2, ok := em2[extNum]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tm1, m2 := e1.value, e2.value\n\n\t\tif m1 != nil && m2 != nil {\n\t\t\t// Both are unencoded.\n\t\t\tif !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// At least one is encoded. To do a semantically correct comparison\n\t\t// we need to unmarshal them first.\n\t\tvar desc *ExtensionDesc\n\t\tif m := extensionMaps[base]; m != nil {\n\t\t\tdesc = m[extNum]\n\t\t}\n\t\tif desc == nil {\n\t\t\tlog.Printf(\"proto: don't know how to compare extension %d of %v\", extNum, base)\n\t\t\tcontinue\n\t\t}\n\t\tvar err error\n\t\tif m1 == nil {\n\t\t\tm1, err = decodeExtension(e1.enc, desc)\n\t\t}\n\t\tif m2 == nil && err == nil {\n\t\t\tm2, err = decodeExtension(e2.enc, desc)\n\t\t}\n\t\tif err != nil {\n\t\t\t// The encoded form is invalid.\n\t\t\tlog.Printf(\"proto: badly encoded extension %d of %v: %v\", extNum, base, err)\n\t\t\treturn false\n\t\t}\n\t\tif !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/equal_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"testing\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\n// Four identical base messages.\n// The init function adds extensions to some of them.\nvar messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}\nvar messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}\nvar messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}\nvar messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}\n\n// Two messages with non-message extensions.\nvar messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}\nvar messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}\n\nfunc init() {\n\text1 := &pb.Ext{Data: String(\"Kirk\")}\n\text2 := &pb.Ext{Data: String(\"Picard\")}\n\n\t// messageWithExtension1a has ext1, but never marshals it.\n\tif err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {\n\t\tpanic(\"SetExtension on 1a failed: \" + err.Error())\n\t}\n\n\t// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.\n\tif err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {\n\t\tpanic(\"SetExtension on 1b failed: \" + err.Error())\n\t}\n\tbuf, err := Marshal(messageWithExtension1b)\n\tif err != nil {\n\t\tpanic(\"Marshal of 1b failed: \" + err.Error())\n\t}\n\tmessageWithExtension1b.Reset()\n\tif err := Unmarshal(buf, messageWithExtension1b); err != nil {\n\t\tpanic(\"Unmarshal of 1b failed: \" + err.Error())\n\t}\n\n\t// messageWithExtension2 has ext2.\n\tif err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {\n\t\tpanic(\"SetExtension on 2 failed: \" + err.Error())\n\t}\n\n\tif err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {\n\t\tpanic(\"SetExtension on Int32-1 failed: \" + err.Error())\n\t}\n\tif err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {\n\t\tpanic(\"SetExtension on Int32-2 failed: \" + err.Error())\n\t}\n}\n\nvar EqualTests = []struct {\n\tdesc string\n\ta, b Message\n\texp  bool\n}{\n\t{\"different types\", &pb.GoEnum{}, &pb.GoTestField{}, false},\n\t{\"equal empty\", &pb.GoEnum{}, &pb.GoEnum{}, true},\n\t{\"nil vs nil\", nil, nil, true},\n\t{\"typed nil vs typed nil\", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},\n\t{\"typed nil vs empty\", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},\n\t{\"different typed nil\", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},\n\n\t{\"one set field, one unset field\", &pb.GoTestField{Label: String(\"foo\")}, &pb.GoTestField{}, false},\n\t{\"one set field zero, one unset field\", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},\n\t{\"different set fields\", &pb.GoTestField{Label: String(\"foo\")}, &pb.GoTestField{Label: String(\"bar\")}, false},\n\t{\"equal set\", &pb.GoTestField{Label: String(\"foo\")}, &pb.GoTestField{Label: String(\"foo\")}, true},\n\n\t{\"repeated, one set\", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},\n\t{\"repeated, different length\", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},\n\t{\"repeated, different value\", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},\n\t{\"repeated, equal\", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},\n\t{\"repeated, nil equal nil\", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},\n\t{\"repeated, nil equal empty\", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},\n\t{\"repeated, empty equal nil\", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},\n\n\t{\n\t\t\"nested, different\",\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"foo\")}},\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"bar\")}},\n\t\tfalse,\n\t},\n\t{\n\t\t\"nested, equal\",\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"wow\")}},\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"wow\")}},\n\t\ttrue,\n\t},\n\n\t{\"bytes\", &pb.OtherMessage{Value: []byte(\"foo\")}, &pb.OtherMessage{Value: []byte(\"foo\")}, true},\n\t{\"bytes, empty\", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},\n\t{\"bytes, empty vs nil\", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},\n\t{\n\t\t\"repeated bytes\",\n\t\t&pb.MyMessage{RepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")}},\n\t\t&pb.MyMessage{RepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")}},\n\t\ttrue,\n\t},\n\n\t{\"extension vs. no extension\", messageWithoutExtension, messageWithExtension1a, false},\n\t{\"extension vs. same extension\", messageWithExtension1a, messageWithExtension1b, true},\n\t{\"extension vs. different extension\", messageWithExtension1a, messageWithExtension2, false},\n\n\t{\"int32 extension vs. itself\", messageWithInt32Extension1, messageWithInt32Extension1, true},\n\t{\"int32 extension vs. a different int32\", messageWithInt32Extension1, messageWithInt32Extension2, false},\n\n\t{\n\t\t\"message with group\",\n\t\t&pb.MyMessage{\n\t\t\tCount: Int32(1),\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: Int32(5),\n\t\t\t},\n\t\t},\n\t\t&pb.MyMessage{\n\t\t\tCount: Int32(1),\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: Int32(5),\n\t\t\t},\n\t\t},\n\t\ttrue,\n\t},\n\n\t{\n\t\t\"map same\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\ttrue,\n\t},\n\t{\n\t\t\"map different entry\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{2: \"Rob\"}},\n\t\tfalse,\n\t},\n\t{\n\t\t\"map different key only\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{2: \"Ken\"}},\n\t\tfalse,\n\t},\n\t{\n\t\t\"map different value only\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Rob\"}},\n\t\tfalse,\n\t},\n}\n\nfunc TestEqual(t *testing.T) {\n\tfor _, tc := range EqualTests {\n\t\tif res := Equal(tc.a, tc.b); res != tc.exp {\n\t\t\tt.Errorf(\"%v: Equal(%v, %v) = %v, want %v\", tc.desc, tc.a, tc.b, res, tc.exp)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/extensions.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Types and routines for supporting protocol buffer extensions.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.\nvar ErrMissingExtension = errors.New(\"proto: missing extension\")\n\n// ExtensionRange represents a range of message extensions for a protocol buffer.\n// Used in code generated by the protocol compiler.\ntype ExtensionRange struct {\n\tStart, End int32 // both inclusive\n}\n\n// extendableProto is an interface implemented by any protocol buffer that may be extended.\ntype extendableProto interface {\n\tMessage\n\tExtensionRangeArray() []ExtensionRange\n\tExtensionMap() map[int32]Extension\n}\n\nvar extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()\n\n// ExtensionDesc represents an extension specification.\n// Used in generated code from the protocol compiler.\ntype ExtensionDesc struct {\n\tExtendedType  Message     // nil pointer to the type that is being extended\n\tExtensionType interface{} // nil pointer to the extension type\n\tField         int32       // field number\n\tName          string      // fully-qualified name of extension, for text formatting\n\tTag           string      // protobuf tag style\n}\n\nfunc (ed *ExtensionDesc) repeated() bool {\n\tt := reflect.TypeOf(ed.ExtensionType)\n\treturn t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8\n}\n\n// Extension represents an extension in a message.\ntype Extension struct {\n\t// When an extension is stored in a message using SetExtension\n\t// only desc and value are set. When the message is marshaled\n\t// enc will be set to the encoded form of the message.\n\t//\n\t// When a message is unmarshaled and contains extensions, each\n\t// extension will have only enc set. When such an extension is\n\t// accessed using GetExtension (or GetExtensions) desc and value\n\t// will be set.\n\tdesc  *ExtensionDesc\n\tvalue interface{}\n\tenc   []byte\n}\n\n// SetRawExtension is for testing only.\nfunc SetRawExtension(base extendableProto, id int32, b []byte) {\n\tbase.ExtensionMap()[id] = Extension{enc: b}\n}\n\n// isExtensionField returns true iff the given field number is in an extension range.\nfunc isExtensionField(pb extendableProto, field int32) bool {\n\tfor _, er := range pb.ExtensionRangeArray() {\n\t\tif er.Start <= field && field <= er.End {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// checkExtensionTypes checks that the given extension is valid for pb.\nfunc checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {\n\t// Check the extended type.\n\tif a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {\n\t\treturn errors.New(\"proto: bad extended type; \" + b.String() + \" does not extend \" + a.String())\n\t}\n\t// Check the range.\n\tif !isExtensionField(pb, extension.Field) {\n\t\treturn errors.New(\"proto: bad extension number; not in declared ranges\")\n\t}\n\treturn nil\n}\n\n// extPropKey is sufficient to uniquely identify an extension.\ntype extPropKey struct {\n\tbase  reflect.Type\n\tfield int32\n}\n\nvar extProp = struct {\n\tsync.RWMutex\n\tm map[extPropKey]*Properties\n}{\n\tm: make(map[extPropKey]*Properties),\n}\n\nfunc extensionProperties(ed *ExtensionDesc) *Properties {\n\tkey := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}\n\n\textProp.RLock()\n\tif prop, ok := extProp.m[key]; ok {\n\t\textProp.RUnlock()\n\t\treturn prop\n\t}\n\textProp.RUnlock()\n\n\textProp.Lock()\n\tdefer extProp.Unlock()\n\t// Check again.\n\tif prop, ok := extProp.m[key]; ok {\n\t\treturn prop\n\t}\n\n\tprop := new(Properties)\n\tprop.Init(reflect.TypeOf(ed.ExtensionType), \"unknown_name\", ed.Tag, nil)\n\textProp.m[key] = prop\n\treturn prop\n}\n\n// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.\nfunc encodeExtensionMap(m map[int32]Extension) error {\n\tfor k, e := range m {\n\t\tif e.value == nil || e.desc == nil {\n\t\t\t// Extension is only in its encoded form.\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't skip extensions that have an encoded form set,\n\t\t// because the extension value may have been mutated after\n\t\t// the last time this function was called.\n\n\t\tet := reflect.TypeOf(e.desc.ExtensionType)\n\t\tprops := extensionProperties(e.desc)\n\n\t\tp := NewBuffer(nil)\n\t\t// If e.value has type T, the encoder expects a *struct{ X T }.\n\t\t// Pass a *T with a zero field and hope it all works out.\n\t\tx := reflect.New(et)\n\t\tx.Elem().Set(reflect.ValueOf(e.value))\n\t\tif err := props.enc(p, props, toStructPointer(x)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.enc = p.buf\n\t\tm[k] = e\n\t}\n\treturn nil\n}\n\nfunc sizeExtensionMap(m map[int32]Extension) (n int) {\n\tfor _, e := range m {\n\t\tif e.value == nil || e.desc == nil {\n\t\t\t// Extension is only in its encoded form.\n\t\t\tn += len(e.enc)\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't skip extensions that have an encoded form set,\n\t\t// because the extension value may have been mutated after\n\t\t// the last time this function was called.\n\n\t\tet := reflect.TypeOf(e.desc.ExtensionType)\n\t\tprops := extensionProperties(e.desc)\n\n\t\t// If e.value has type T, the encoder expects a *struct{ X T }.\n\t\t// Pass a *T with a zero field and hope it all works out.\n\t\tx := reflect.New(et)\n\t\tx.Elem().Set(reflect.ValueOf(e.value))\n\t\tn += props.size(props, toStructPointer(x))\n\t}\n\treturn\n}\n\n// HasExtension returns whether the given extension is present in pb.\nfunc HasExtension(pb extendableProto, extension *ExtensionDesc) bool {\n\t// TODO: Check types, field numbers, etc.?\n\t_, ok := pb.ExtensionMap()[extension.Field]\n\treturn ok\n}\n\n// ClearExtension removes the given extension from pb.\nfunc ClearExtension(pb extendableProto, extension *ExtensionDesc) {\n\t// TODO: Check types, field numbers, etc.?\n\tdelete(pb.ExtensionMap(), extension.Field)\n}\n\n// GetExtension parses and returns the given extension of pb.\n// If the extension is not present it returns ErrMissingExtension.\nfunc GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {\n\tif err := checkExtensionTypes(pb, extension); err != nil {\n\t\treturn nil, err\n\t}\n\n\temap := pb.ExtensionMap()\n\te, ok := emap[extension.Field]\n\tif !ok {\n\t\treturn nil, ErrMissingExtension\n\t}\n\tif e.value != nil {\n\t\t// Already decoded. Check the descriptor, though.\n\t\tif e.desc != extension {\n\t\t\t// This shouldn't happen. If it does, it means that\n\t\t\t// GetExtension was called twice with two different\n\t\t\t// descriptors with the same field number.\n\t\t\treturn nil, errors.New(\"proto: descriptor conflict\")\n\t\t}\n\t\treturn e.value, nil\n\t}\n\n\tv, err := decodeExtension(e.enc, extension)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remember the decoded version and drop the encoded version.\n\t// That way it is safe to mutate what we return.\n\te.value = v\n\te.desc = extension\n\te.enc = nil\n\temap[extension.Field] = e\n\treturn e.value, nil\n}\n\n// decodeExtension decodes an extension encoded in b.\nfunc decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {\n\to := NewBuffer(b)\n\n\tt := reflect.TypeOf(extension.ExtensionType)\n\trep := extension.repeated()\n\n\tprops := extensionProperties(extension)\n\n\t// t is a pointer to a struct, pointer to basic type or a slice.\n\t// Allocate a \"field\" to store the pointer/slice itself; the\n\t// pointer/slice will be stored here. We pass\n\t// the address of this field to props.dec.\n\t// This passes a zero field and a *t and lets props.dec\n\t// interpret it as a *struct{ x t }.\n\tvalue := reflect.New(t).Elem()\n\n\tfor {\n\t\t// Discard wire type and field number varint. It isn't needed.\n\t\tif _, err := o.DecodeVarint(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !rep || o.index >= len(o.buf) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn value.Interface(), nil\n}\n\n// GetExtensions returns a slice of the extensions present in pb that are also listed in es.\n// The returned slice has the same length as es; missing extensions will appear as nil elements.\nfunc GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {\n\tepb, ok := pb.(extendableProto)\n\tif !ok {\n\t\terr = errors.New(\"proto: not an extendable proto\")\n\t\treturn\n\t}\n\textensions = make([]interface{}, len(es))\n\tfor i, e := range es {\n\t\textensions[i], err = GetExtension(epb, e)\n\t\tif err == ErrMissingExtension {\n\t\t\terr = nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n// SetExtension sets the specified extension of pb to the specified value.\nfunc SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {\n\tif err := checkExtensionTypes(pb, extension); err != nil {\n\t\treturn err\n\t}\n\ttyp := reflect.TypeOf(extension.ExtensionType)\n\tif typ != reflect.TypeOf(value) {\n\t\treturn errors.New(\"proto: bad extension value type\")\n\t}\n\t// nil extension values need to be caught early, because the\n\t// encoder can't distinguish an ErrNil due to a nil extension\n\t// from an ErrNil due to a missing field. Extensions are\n\t// always optional, so the encoder would just swallow the error\n\t// and drop all the extensions from the encoded message.\n\tif reflect.ValueOf(value).IsNil() {\n\t\treturn fmt.Errorf(\"proto: SetExtension called with nil value of type %T\", value)\n\t}\n\n\tpb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}\n\treturn nil\n}\n\n// A global registry of extensions.\n// The generated code will register the generated descriptors by calling RegisterExtension.\n\nvar extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)\n\n// RegisterExtension is called from the generated code.\nfunc RegisterExtension(desc *ExtensionDesc) {\n\tst := reflect.TypeOf(desc.ExtendedType).Elem()\n\tm := extensionMaps[st]\n\tif m == nil {\n\t\tm = make(map[int32]*ExtensionDesc)\n\t\textensionMaps[st] = m\n\t}\n\tif _, ok := m[desc.Field]; ok {\n\t\tpanic(\"proto: duplicate extension registered: \" + st.String() + \" \" + strconv.Itoa(int(desc.Field)))\n\t}\n\tm[desc.Field] = desc\n}\n\n// RegisteredExtensions returns a map of the registered extensions of a\n// protocol buffer struct, indexed by the extension number.\n// The argument pb should be a nil pointer to the struct type.\nfunc RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {\n\treturn extensionMaps[reflect.TypeOf(pb).Elem()]\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/extensions_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\nfunc TestGetExtensionsWithMissingExtensions(t *testing.T) {\n\tmsg := &pb.MyMessage{}\n\text1 := &pb.Ext{}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {\n\t\tt.Fatalf(\"Could not set ext1: %s\", ext1)\n\t}\n\texts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{\n\t\tpb.E_Ext_More,\n\t\tpb.E_Ext_Text,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"GetExtensions() failed: %s\", err)\n\t}\n\tif exts[0] != ext1 {\n\t\tt.Errorf(\"ext1 not in returned extensions: %T %v\", exts[0], exts[0])\n\t}\n\tif exts[1] != nil {\n\t\tt.Errorf(\"ext2 in returned extensions: %T %v\", exts[1], exts[1])\n\t}\n}\n\nfunc TestGetExtensionStability(t *testing.T) {\n\tcheck := func(m *pb.MyMessage) bool {\n\t\text1, err := proto.GetExtension(m, pb.E_Ext_More)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetExtension() failed: %s\", err)\n\t\t}\n\t\text2, err := proto.GetExtension(m, pb.E_Ext_More)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetExtension() failed: %s\", err)\n\t\t}\n\t\treturn ext1 == ext2\n\t}\n\tmsg := &pb.MyMessage{Count: proto.Int32(4)}\n\text0 := &pb.Ext{}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {\n\t\tt.Fatalf(\"Could not set ext1: %s\", ext0)\n\t}\n\tif !check(msg) {\n\t\tt.Errorf(\"GetExtension() not stable before marshaling\")\n\t}\n\tbb, err := proto.Marshal(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal() failed: %s\", err)\n\t}\n\tmsg1 := &pb.MyMessage{}\n\terr = proto.Unmarshal(bb, msg1)\n\tif err != nil {\n\t\tt.Fatalf(\"Unmarshal() failed: %s\", err)\n\t}\n\tif !check(msg1) {\n\t\tt.Errorf(\"GetExtension() not stable after unmarshaling\")\n\t}\n}\n\nfunc TestExtensionsRoundTrip(t *testing.T) {\n\tmsg := &pb.MyMessage{}\n\text1 := &pb.Ext{\n\t\tData: proto.String(\"hi\"),\n\t}\n\text2 := &pb.Ext{\n\t\tData: proto.String(\"there\"),\n\t}\n\texists := proto.HasExtension(msg, pb.E_Ext_More)\n\tif exists {\n\t\tt.Error(\"Extension More present unexpectedly\")\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {\n\t\tt.Error(err)\n\t}\n\te, err := proto.GetExtension(msg, pb.E_Ext_More)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tx, ok := e.(*pb.Ext)\n\tif !ok {\n\t\tt.Errorf(\"e has type %T, expected testdata.Ext\", e)\n\t} else if *x.Data != \"there\" {\n\t\tt.Errorf(\"SetExtension failed to overwrite, got %+v, not 'there'\", x)\n\t}\n\tproto.ClearExtension(msg, pb.E_Ext_More)\n\tif _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {\n\t\tt.Errorf(\"got %v, expected ErrMissingExtension\", e)\n\t}\n\tif _, err := proto.GetExtension(msg, pb.E_X215); err == nil {\n\t\tt.Error(\"expected bad extension error, got nil\")\n\t}\n\tif err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {\n\t\tt.Error(\"expected extension err\")\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {\n\t\tt.Error(\"expected some sort of type mismatch error, got nil\")\n\t}\n}\n\nfunc TestNilExtension(t *testing.T) {\n\tmsg := &pb.MyMessage{\n\t\tCount: proto.Int32(1),\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String(\"hello\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {\n\t\tt.Error(\"expected SetExtension to fail due to a nil extension\")\n\t} else if want := \"proto: SetExtension called with nil value of type *testdata.Ext\"; err.Error() != want {\n\t\tt.Errorf(\"expected error %v, got %v\", want, err)\n\t}\n\t// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update\n\t// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/lib.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/*\nPackage proto converts data structures to and from the wire format of\nprotocol buffers.  It works in concert with the Go source code generated\nfor .proto files by the protocol compiler.\n\nA summary of the properties of the protocol buffer interface\nfor a protocol buffer variable v:\n\n  - Names are turned from camel_case to CamelCase for export.\n  - There are no methods on v to set fields; just treat\n\tthem as structure fields.\n  - There are getters that return a field's value if set,\n\tand return the field's default value if unset.\n\tThe getters work even if the receiver is a nil message.\n  - The zero value for a struct is its correct initialization state.\n\tAll desired fields must be set before marshaling.\n  - A Reset() method will restore a protobuf struct to its zero state.\n  - Non-repeated fields are pointers to the values; nil means unset.\n\tThat is, optional or required field int32 f becomes F *int32.\n  - Repeated fields are slices.\n  - Helper functions are available to aid the setting of fields.\n\tmsg.Foo = proto.String(\"hello\") // set field\n  - Constants are defined to hold the default values of all fields that\n\thave them.  They have the form Default_StructName_FieldName.\n\tBecause the getter methods handle defaulted values,\n\tdirect use of these constants should be rare.\n  - Enums are given type names and maps from names to values.\n\tEnum values are prefixed by the enclosing message's name, or by the\n\tenum's type name if it is a top-level enum. Enum types have a String\n\tmethod, and a Enum method to assist in message construction.\n  - Nested messages, groups and enums have type names prefixed with the name of\n\tthe surrounding message type.\n  - Extensions are given descriptor names that start with E_,\n\tfollowed by an underscore-delimited list of the nested messages\n\tthat contain it (if any) followed by the CamelCased name of the\n\textension field itself.  HasExtension, ClearExtension, GetExtension\n\tand SetExtension are functions for manipulating extensions.\n  - Marshal and Unmarshal are functions to encode and decode the wire format.\n\nThe simplest way to describe this is to see an example.\nGiven file test.proto, containing\n\n\tpackage example;\n\n\tenum FOO { X = 17; }\n\n\tmessage Test {\n\t  required string label = 1;\n\t  optional int32 type = 2 [default=77];\n\t  repeated int64 reps = 3;\n\t  optional group OptionalGroup = 4 {\n\t    required string RequiredField = 5;\n\t  }\n\t}\n\nThe resulting file, test.pb.go, is:\n\n\tpackage example\n\n\timport proto \"github.com/golang/protobuf/proto\"\n\timport math \"math\"\n\n\ttype FOO int32\n\tconst (\n\t\tFOO_X FOO = 17\n\t)\n\tvar FOO_name = map[int32]string{\n\t\t17: \"X\",\n\t}\n\tvar FOO_value = map[string]int32{\n\t\t\"X\": 17,\n\t}\n\n\tfunc (x FOO) Enum() *FOO {\n\t\tp := new(FOO)\n\t\t*p = x\n\t\treturn p\n\t}\n\tfunc (x FOO) String() string {\n\t\treturn proto.EnumName(FOO_name, int32(x))\n\t}\n\tfunc (x *FOO) UnmarshalJSON(data []byte) error {\n\t\tvalue, err := proto.UnmarshalJSONEnum(FOO_value, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*x = FOO(value)\n\t\treturn nil\n\t}\n\n\ttype Test struct {\n\t\tLabel            *string             `protobuf:\"bytes,1,req,name=label\" json:\"label,omitempty\"`\n\t\tType             *int32              `protobuf:\"varint,2,opt,name=type,def=77\" json:\"type,omitempty\"`\n\t\tReps             []int64             `protobuf:\"varint,3,rep,name=reps\" json:\"reps,omitempty\"`\n\t\tOptionalgroup    *Test_OptionalGroup `protobuf:\"group,4,opt,name=OptionalGroup\" json:\"optionalgroup,omitempty\"`\n\t\tXXX_unrecognized []byte              `json:\"-\"`\n\t}\n\tfunc (m *Test) Reset()         { *m = Test{} }\n\tfunc (m *Test) String() string { return proto.CompactTextString(m) }\n\tfunc (*Test) ProtoMessage()    {}\n\tconst Default_Test_Type int32 = 77\n\n\tfunc (m *Test) GetLabel() string {\n\t\tif m != nil && m.Label != nil {\n\t\t\treturn *m.Label\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfunc (m *Test) GetType() int32 {\n\t\tif m != nil && m.Type != nil {\n\t\t\treturn *m.Type\n\t\t}\n\t\treturn Default_Test_Type\n\t}\n\n\tfunc (m *Test) GetOptionalgroup() *Test_OptionalGroup {\n\t\tif m != nil {\n\t\t\treturn m.Optionalgroup\n\t\t}\n\t\treturn nil\n\t}\n\n\ttype Test_OptionalGroup struct {\n\t\tRequiredField *string `protobuf:\"bytes,5,req\" json:\"RequiredField,omitempty\"`\n\t}\n\tfunc (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }\n\tfunc (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }\n\n\tfunc (m *Test_OptionalGroup) GetRequiredField() string {\n\t\tif m != nil && m.RequiredField != nil {\n\t\t\treturn *m.RequiredField\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfunc init() {\n\t\tproto.RegisterEnum(\"example.FOO\", FOO_name, FOO_value)\n\t}\n\nTo create and play with a Test object:\n\npackage main\n\n\timport (\n\t\t\"log\"\n\n\t\t\"github.com/golang/protobuf/proto\"\n\t\tpb \"./example.pb\"\n\t)\n\n\tfunc main() {\n\t\ttest := &pb.Test{\n\t\t\tLabel: proto.String(\"hello\"),\n\t\t\tType:  proto.Int32(17),\n\t\t\tOptionalgroup: &pb.Test_OptionalGroup{\n\t\t\t\tRequiredField: proto.String(\"good bye\"),\n\t\t\t},\n\t\t}\n\t\tdata, err := proto.Marshal(test)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"marshaling error: \", err)\n\t\t}\n\t\tnewTest := &pb.Test{}\n\t\terr = proto.Unmarshal(data, newTest)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unmarshaling error: \", err)\n\t\t}\n\t\t// Now test and newTest contain the same data.\n\t\tif test.GetLabel() != newTest.GetLabel() {\n\t\t\tlog.Fatalf(\"data mismatch %q != %q\", test.GetLabel(), newTest.GetLabel())\n\t\t}\n\t\t// etc.\n\t}\n*/\npackage proto\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// Message is implemented by generated protocol buffer messages.\ntype Message interface {\n\tReset()\n\tString() string\n\tProtoMessage()\n}\n\n// Stats records allocation details about the protocol buffer encoders\n// and decoders.  Useful for tuning the library itself.\ntype Stats struct {\n\tEmalloc uint64 // mallocs in encode\n\tDmalloc uint64 // mallocs in decode\n\tEncode  uint64 // number of encodes\n\tDecode  uint64 // number of decodes\n\tChit    uint64 // number of cache hits\n\tCmiss   uint64 // number of cache misses\n\tSize    uint64 // number of sizes\n}\n\n// Set to true to enable stats collection.\nconst collectStats = false\n\nvar stats Stats\n\n// GetStats returns a copy of the global Stats structure.\nfunc GetStats() Stats { return stats }\n\n// A Buffer is a buffer manager for marshaling and unmarshaling\n// protocol buffers.  It may be reused between invocations to\n// reduce memory usage.  It is not necessary to use a Buffer;\n// the global functions Marshal and Unmarshal create a\n// temporary Buffer and are fine for most applications.\ntype Buffer struct {\n\tbuf   []byte // encode/decode byte stream\n\tindex int    // write point\n\n\t// pools of basic types to amortize allocation.\n\tbools   []bool\n\tuint32s []uint32\n\tuint64s []uint64\n\n\t// extra pools, only used with pointer_reflect.go\n\tint32s   []int32\n\tint64s   []int64\n\tfloat32s []float32\n\tfloat64s []float64\n}\n\n// NewBuffer allocates a new Buffer and initializes its internal data to\n// the contents of the argument slice.\nfunc NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e}\n}\n\n// Reset resets the Buffer, ready for marshaling a new protocol buffer.\nfunc (p *Buffer) Reset() {\n\tp.buf = p.buf[0:0] // for reading/writing\n\tp.index = 0        // for reading\n}\n\n// SetBuf replaces the internal buffer with the slice,\n// ready for unmarshaling the contents of the slice.\nfunc (p *Buffer) SetBuf(s []byte) {\n\tp.buf = s\n\tp.index = 0\n}\n\n// Bytes returns the contents of the Buffer.\nfunc (p *Buffer) Bytes() []byte { return p.buf }\n\n/*\n * Helper routines for simplifying the creation of optional fields of basic type.\n */\n\n// Bool is a helper routine that allocates a new bool value\n// to store v and returns a pointer to it.\nfunc Bool(v bool) *bool {\n\treturn &v\n}\n\n// Int32 is a helper routine that allocates a new int32 value\n// to store v and returns a pointer to it.\nfunc Int32(v int32) *int32 {\n\treturn &v\n}\n\n// Int is a helper routine that allocates a new int32 value\n// to store v and returns a pointer to it, but unlike Int32\n// its argument value is an int.\nfunc Int(v int) *int32 {\n\tp := new(int32)\n\t*p = int32(v)\n\treturn p\n}\n\n// Int64 is a helper routine that allocates a new int64 value\n// to store v and returns a pointer to it.\nfunc Int64(v int64) *int64 {\n\treturn &v\n}\n\n// Float32 is a helper routine that allocates a new float32 value\n// to store v and returns a pointer to it.\nfunc Float32(v float32) *float32 {\n\treturn &v\n}\n\n// Float64 is a helper routine that allocates a new float64 value\n// to store v and returns a pointer to it.\nfunc Float64(v float64) *float64 {\n\treturn &v\n}\n\n// Uint32 is a helper routine that allocates a new uint32 value\n// to store v and returns a pointer to it.\nfunc Uint32(v uint32) *uint32 {\n\treturn &v\n}\n\n// Uint64 is a helper routine that allocates a new uint64 value\n// to store v and returns a pointer to it.\nfunc Uint64(v uint64) *uint64 {\n\treturn &v\n}\n\n// String is a helper routine that allocates a new string value\n// to store v and returns a pointer to it.\nfunc String(v string) *string {\n\treturn &v\n}\n\n// EnumName is a helper function to simplify printing protocol buffer enums\n// by name.  Given an enum map and a value, it returns a useful string.\nfunc EnumName(m map[int32]string, v int32) string {\n\ts, ok := m[v]\n\tif ok {\n\t\treturn s\n\t}\n\treturn strconv.Itoa(int(v))\n}\n\n// UnmarshalJSONEnum is a helper function to simplify recovering enum int values\n// from their JSON-encoded representation. Given a map from the enum's symbolic\n// names to its int values, and a byte buffer containing the JSON-encoded\n// value, it returns an int32 that can be cast to the enum type by the caller.\n//\n// The function can deal with both JSON representations, numeric and symbolic.\nfunc UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {\n\tif data[0] == '\"' {\n\t\t// New style: enums are strings.\n\t\tvar repr string\n\t\tif err := json.Unmarshal(data, &repr); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\tval, ok := m[repr]\n\t\tif !ok {\n\t\t\treturn 0, fmt.Errorf(\"unrecognized enum %s value %q\", enumName, repr)\n\t\t}\n\t\treturn val, nil\n\t}\n\t// Old style: enums are ints.\n\tvar val int32\n\tif err := json.Unmarshal(data, &val); err != nil {\n\t\treturn 0, fmt.Errorf(\"cannot unmarshal %#q into enum %s\", data, enumName)\n\t}\n\treturn val, nil\n}\n\n// DebugPrint dumps the encoded data in b in a debugging format with a header\n// including the string s. Used in testing but made available for general debugging.\nfunc (p *Buffer) DebugPrint(s string, b []byte) {\n\tvar u uint64\n\n\tobuf := p.buf\n\tindex := p.index\n\tp.buf = b\n\tp.index = 0\n\tdepth := 0\n\n\tfmt.Printf(\"\\n--- %s ---\\n\", s)\n\nout:\n\tfor {\n\t\tfor i := 0; i < depth; i++ {\n\t\t\tfmt.Print(\"  \")\n\t\t}\n\n\t\tindex := p.index\n\t\tif index == len(p.buf) {\n\t\t\tbreak\n\t\t}\n\n\t\top, err := p.DecodeVarint()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%3d: fetching op err %v\\n\", index, err)\n\t\t\tbreak out\n\t\t}\n\t\ttag := op >> 3\n\t\twire := op & 7\n\n\t\tswitch wire {\n\t\tdefault:\n\t\t\tfmt.Printf(\"%3d: t=%3d unknown wire=%d\\n\",\n\t\t\t\tindex, tag, wire)\n\t\t\tbreak out\n\n\t\tcase WireBytes:\n\t\t\tvar r []byte\n\n\t\t\tr, err = p.DecodeRawBytes(false)\n\t\t\tif err != nil {\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d bytes [%d]\", index, tag, len(r))\n\t\t\tif len(r) <= 6 {\n\t\t\t\tfor i := 0; i < len(r); i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor i := 0; i < 3; i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\" ..\")\n\t\t\t\tfor i := len(r) - 3; i < len(r); i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\n\t\tcase WireFixed32:\n\t\t\tu, err = p.DecodeFixed32()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d fix32 err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d fix32 %d\\n\", index, tag, u)\n\n\t\tcase WireFixed64:\n\t\t\tu, err = p.DecodeFixed64()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d fix64 err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d fix64 %d\\n\", index, tag, u)\n\t\t\tbreak\n\n\t\tcase WireVarint:\n\t\t\tu, err = p.DecodeVarint()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d varint err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d varint %d\\n\", index, tag, u)\n\n\t\tcase WireStartGroup:\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d start err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d start\\n\", index, tag)\n\t\t\tdepth++\n\n\t\tcase WireEndGroup:\n\t\t\tdepth--\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d end err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d end\\n\", index, tag)\n\t\t}\n\t}\n\n\tif depth != 0 {\n\t\tfmt.Printf(\"%3d: start-end not balanced %d\\n\", p.index, depth)\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tp.buf = obuf\n\tp.index = index\n}\n\n// SetDefaults sets unset protocol buffer fields to their default values.\n// It only modifies fields that are both unset and have defined defaults.\n// It recursively sets default values in any non-nil sub-messages.\nfunc SetDefaults(pb Message) {\n\tsetDefaults(reflect.ValueOf(pb), true, false)\n}\n\n// v is a pointer to a struct.\nfunc setDefaults(v reflect.Value, recur, zeros bool) {\n\tv = v.Elem()\n\n\tdefaultMu.RLock()\n\tdm, ok := defaults[v.Type()]\n\tdefaultMu.RUnlock()\n\tif !ok {\n\t\tdm = buildDefaultMessage(v.Type())\n\t\tdefaultMu.Lock()\n\t\tdefaults[v.Type()] = dm\n\t\tdefaultMu.Unlock()\n\t}\n\n\tfor _, sf := range dm.scalars {\n\t\tf := v.Field(sf.index)\n\t\tif !f.IsNil() {\n\t\t\t// field already set\n\t\t\tcontinue\n\t\t}\n\t\tdv := sf.value\n\t\tif dv == nil && !zeros {\n\t\t\t// no explicit default, and don't want to set zeros\n\t\t\tcontinue\n\t\t}\n\t\tfptr := f.Addr().Interface() // **T\n\t\t// TODO: Consider batching the allocations we do here.\n\t\tswitch sf.kind {\n\t\tcase reflect.Bool:\n\t\t\tb := new(bool)\n\t\t\tif dv != nil {\n\t\t\t\t*b = dv.(bool)\n\t\t\t}\n\t\t\t*(fptr.(**bool)) = b\n\t\tcase reflect.Float32:\n\t\t\tf := new(float32)\n\t\t\tif dv != nil {\n\t\t\t\t*f = dv.(float32)\n\t\t\t}\n\t\t\t*(fptr.(**float32)) = f\n\t\tcase reflect.Float64:\n\t\t\tf := new(float64)\n\t\t\tif dv != nil {\n\t\t\t\t*f = dv.(float64)\n\t\t\t}\n\t\t\t*(fptr.(**float64)) = f\n\t\tcase reflect.Int32:\n\t\t\t// might be an enum\n\t\t\tif ft := f.Type(); ft != int32PtrType {\n\t\t\t\t// enum\n\t\t\t\tf.Set(reflect.New(ft.Elem()))\n\t\t\t\tif dv != nil {\n\t\t\t\t\tf.Elem().SetInt(int64(dv.(int32)))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// int32 field\n\t\t\t\ti := new(int32)\n\t\t\t\tif dv != nil {\n\t\t\t\t\t*i = dv.(int32)\n\t\t\t\t}\n\t\t\t\t*(fptr.(**int32)) = i\n\t\t\t}\n\t\tcase reflect.Int64:\n\t\t\ti := new(int64)\n\t\t\tif dv != nil {\n\t\t\t\t*i = dv.(int64)\n\t\t\t}\n\t\t\t*(fptr.(**int64)) = i\n\t\tcase reflect.String:\n\t\t\ts := new(string)\n\t\t\tif dv != nil {\n\t\t\t\t*s = dv.(string)\n\t\t\t}\n\t\t\t*(fptr.(**string)) = s\n\t\tcase reflect.Uint8:\n\t\t\t// exceptional case: []byte\n\t\t\tvar b []byte\n\t\t\tif dv != nil {\n\t\t\t\tdb := dv.([]byte)\n\t\t\t\tb = make([]byte, len(db))\n\t\t\t\tcopy(b, db)\n\t\t\t} else {\n\t\t\t\tb = []byte{}\n\t\t\t}\n\t\t\t*(fptr.(*[]byte)) = b\n\t\tcase reflect.Uint32:\n\t\t\tu := new(uint32)\n\t\t\tif dv != nil {\n\t\t\t\t*u = dv.(uint32)\n\t\t\t}\n\t\t\t*(fptr.(**uint32)) = u\n\t\tcase reflect.Uint64:\n\t\t\tu := new(uint64)\n\t\t\tif dv != nil {\n\t\t\t\t*u = dv.(uint64)\n\t\t\t}\n\t\t\t*(fptr.(**uint64)) = u\n\t\tdefault:\n\t\t\tlog.Printf(\"proto: can't set default for field %v (sf.kind=%v)\", f, sf.kind)\n\t\t}\n\t}\n\n\tfor _, ni := range dm.nested {\n\t\tf := v.Field(ni)\n\t\t// f is *T or []*T or map[T]*T\n\t\tswitch f.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif f.IsNil() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsetDefaults(f, recur, zeros)\n\n\t\tcase reflect.Slice:\n\t\t\tfor i := 0; i < f.Len(); i++ {\n\t\t\t\te := f.Index(i)\n\t\t\t\tif e.IsNil() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsetDefaults(e, recur, zeros)\n\t\t\t}\n\n\t\tcase reflect.Map:\n\t\t\tfor _, k := range f.MapKeys() {\n\t\t\t\te := f.MapIndex(k)\n\t\t\t\tif e.IsNil() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsetDefaults(e, recur, zeros)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar (\n\t// defaults maps a protocol buffer struct type to a slice of the fields,\n\t// with its scalar fields set to their proto-declared non-zero default values.\n\tdefaultMu sync.RWMutex\n\tdefaults  = make(map[reflect.Type]defaultMessage)\n\n\tint32PtrType = reflect.TypeOf((*int32)(nil))\n)\n\n// defaultMessage represents information about the default values of a message.\ntype defaultMessage struct {\n\tscalars []scalarField\n\tnested  []int // struct field index of nested messages\n}\n\ntype scalarField struct {\n\tindex int          // struct field index\n\tkind  reflect.Kind // element type (the T in *T or []T)\n\tvalue interface{}  // the proto-declared default value, or nil\n}\n\n// t is a struct type.\nfunc buildDefaultMessage(t reflect.Type) (dm defaultMessage) {\n\tsprop := GetProperties(t)\n\tfor _, prop := range sprop.Prop {\n\t\tfi, ok := sprop.decoderTags.get(prop.Tag)\n\t\tif !ok {\n\t\t\t// XXX_unrecognized\n\t\t\tcontinue\n\t\t}\n\t\tft := t.Field(fi).Type\n\n\t\tvar canHaveDefault, nestedMessage bool\n\t\tswitch ft.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif ft.Elem().Kind() == reflect.Struct {\n\t\t\t\tnestedMessage = true\n\t\t\t} else {\n\t\t\t\tcanHaveDefault = true // proto2 scalar field\n\t\t\t}\n\n\t\tcase reflect.Slice:\n\t\t\tswitch ft.Elem().Kind() {\n\t\t\tcase reflect.Ptr:\n\t\t\t\tnestedMessage = true // repeated message\n\t\t\tcase reflect.Uint8:\n\t\t\t\tcanHaveDefault = true // bytes field\n\t\t\t}\n\n\t\tcase reflect.Map:\n\t\t\tif ft.Elem().Kind() == reflect.Ptr {\n\t\t\t\tnestedMessage = true // map with message values\n\t\t\t}\n\t\t}\n\n\t\tif !canHaveDefault {\n\t\t\tif nestedMessage {\n\t\t\t\tdm.nested = append(dm.nested, fi)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tsf := scalarField{\n\t\t\tindex: fi,\n\t\t\tkind:  ft.Elem().Kind(),\n\t\t}\n\n\t\t// scalar fields without defaults\n\t\tif !prop.HasDefault {\n\t\t\tdm.scalars = append(dm.scalars, sf)\n\t\t\tcontinue\n\t\t}\n\n\t\t// a scalar field: either *T or []byte\n\t\tswitch ft.Elem().Kind() {\n\t\tcase reflect.Bool:\n\t\t\tx, err := strconv.ParseBool(prop.Default)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"proto: bad default bool %q: %v\", prop.Default, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsf.value = x\n\t\tcase reflect.Float32:\n\t\t\tx, err := strconv.ParseFloat(prop.Default, 32)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"proto: bad default float32 %q: %v\", prop.Default, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsf.value = float32(x)\n\t\tcase reflect.Float64:\n\t\t\tx, err := strconv.ParseFloat(prop.Default, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"proto: bad default float64 %q: %v\", prop.Default, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsf.value = x\n\t\tcase reflect.Int32:\n\t\t\tx, err := strconv.ParseInt(prop.Default, 10, 32)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"proto: bad default int32 %q: %v\", prop.Default, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsf.value = int32(x)\n\t\tcase reflect.Int64:\n\t\t\tx, err := strconv.ParseInt(prop.Default, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"proto: bad default int64 %q: %v\", prop.Default, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsf.value = x\n\t\tcase reflect.String:\n\t\t\tsf.value = prop.Default\n\t\tcase reflect.Uint8:\n\t\t\t// []byte (not *uint8)\n\t\t\tsf.value = []byte(prop.Default)\n\t\tcase reflect.Uint32:\n\t\t\tx, err := strconv.ParseUint(prop.Default, 10, 32)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"proto: bad default uint32 %q: %v\", prop.Default, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsf.value = uint32(x)\n\t\tcase reflect.Uint64:\n\t\t\tx, err := strconv.ParseUint(prop.Default, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"proto: bad default uint64 %q: %v\", prop.Default, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsf.value = x\n\t\tdefault:\n\t\t\tlog.Printf(\"proto: unhandled def kind %v\", ft.Elem().Kind())\n\t\t\tcontinue\n\t\t}\n\n\t\tdm.scalars = append(dm.scalars, sf)\n\t}\n\n\treturn dm\n}\n\n// Map fields may have key types of non-float scalars, strings and enums.\n// The easiest way to sort them in some deterministic order is to use fmt.\n// If this turns out to be inefficient we can always consider other options,\n// such as doing a Schwartzian transform.\n\ntype mapKeys []reflect.Value\n\nfunc (s mapKeys) Len() int      { return len(s) }\nfunc (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s mapKeys) Less(i, j int) bool {\n\treturn fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/message_set.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Support for message sets.\n */\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n)\n\n// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.\n// A message type ID is required for storing a protocol buffer in a message set.\nvar ErrNoMessageTypeId = errors.New(\"proto does not have a message type ID\")\n\n// The first two types (_MessageSet_Item and MessageSet)\n// model what the protocol compiler produces for the following protocol message:\n//   message MessageSet {\n//     repeated group Item = 1 {\n//       required int32 type_id = 2;\n//       required string message = 3;\n//     };\n//   }\n// That is the MessageSet wire format. We can't use a proto to generate these\n// because that would introduce a circular dependency between it and this package.\n//\n// When a proto1 proto has a field that looks like:\n//   optional message<MessageSet> info = 3;\n// the protocol compiler produces a field in the generated struct that looks like:\n//   Info *_proto_.MessageSet  `protobuf:\"bytes,3,opt,name=info\"`\n// The package is automatically inserted so there is no need for that proto file to\n// import this package.\n\ntype _MessageSet_Item struct {\n\tTypeId  *int32 `protobuf:\"varint,2,req,name=type_id\"`\n\tMessage []byte `protobuf:\"bytes,3,req,name=message\"`\n}\n\ntype MessageSet struct {\n\tItem             []*_MessageSet_Item `protobuf:\"group,1,rep\"`\n\tXXX_unrecognized []byte\n\t// TODO: caching?\n}\n\n// Make sure MessageSet is a Message.\nvar _ Message = (*MessageSet)(nil)\n\n// messageTypeIder is an interface satisfied by a protocol buffer type\n// that may be stored in a MessageSet.\ntype messageTypeIder interface {\n\tMessageTypeId() int32\n}\n\nfunc (ms *MessageSet) find(pb Message) *_MessageSet_Item {\n\tmti, ok := pb.(messageTypeIder)\n\tif !ok {\n\t\treturn nil\n\t}\n\tid := mti.MessageTypeId()\n\tfor _, item := range ms.Item {\n\t\tif *item.TypeId == id {\n\t\t\treturn item\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ms *MessageSet) Has(pb Message) bool {\n\tif ms.find(pb) != nil {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (ms *MessageSet) Unmarshal(pb Message) error {\n\tif item := ms.find(pb); item != nil {\n\t\treturn Unmarshal(item.Message, pb)\n\t}\n\tif _, ok := pb.(messageTypeIder); !ok {\n\t\treturn ErrNoMessageTypeId\n\t}\n\treturn nil // TODO: return error instead?\n}\n\nfunc (ms *MessageSet) Marshal(pb Message) error {\n\tmsg, err := Marshal(pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif item := ms.find(pb); item != nil {\n\t\t// reuse existing item\n\t\titem.Message = msg\n\t\treturn nil\n\t}\n\n\tmti, ok := pb.(messageTypeIder)\n\tif !ok {\n\t\treturn ErrNoMessageTypeId\n\t}\n\n\tmtid := mti.MessageTypeId()\n\tms.Item = append(ms.Item, &_MessageSet_Item{\n\t\tTypeId:  &mtid,\n\t\tMessage: msg,\n\t})\n\treturn nil\n}\n\nfunc (ms *MessageSet) Reset()         { *ms = MessageSet{} }\nfunc (ms *MessageSet) String() string { return CompactTextString(ms) }\nfunc (*MessageSet) ProtoMessage()     {}\n\n// Support for the message_set_wire_format message option.\n\nfunc skipVarint(buf []byte) []byte {\n\ti := 0\n\tfor ; buf[i]&0x80 != 0; i++ {\n\t}\n\treturn buf[i+1:]\n}\n\n// MarshalMessageSet encodes the extension map represented by m in the message set wire format.\n// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.\nfunc MarshalMessageSet(m map[int32]Extension) ([]byte, error) {\n\tif err := encodeExtensionMap(m); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Sort extension IDs to provide a deterministic encoding.\n\t// See also enc_map in encode.go.\n\tids := make([]int, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, int(id))\n\t}\n\tsort.Ints(ids)\n\n\tms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}\n\tfor _, id := range ids {\n\t\te := m[int32(id)]\n\t\t// Remove the wire type and field number varint, as well as the length varint.\n\t\tmsg := skipVarint(skipVarint(e.enc))\n\n\t\tms.Item = append(ms.Item, &_MessageSet_Item{\n\t\t\tTypeId:  Int32(int32(id)),\n\t\t\tMessage: msg,\n\t\t})\n\t}\n\treturn Marshal(ms)\n}\n\n// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.\n// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.\nfunc UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {\n\tms := new(MessageSet)\n\tif err := Unmarshal(buf, ms); err != nil {\n\t\treturn err\n\t}\n\tfor _, item := range ms.Item {\n\t\tid := *item.TypeId\n\t\tmsg := item.Message\n\n\t\t// Restore wire type and field number varint, plus length varint.\n\t\t// Be careful to preserve duplicate items.\n\t\tb := EncodeVarint(uint64(id)<<3 | WireBytes)\n\t\tif ext, ok := m[id]; ok {\n\t\t\t// Existing data; rip off the tag and length varint\n\t\t\t// so we join the new data correctly.\n\t\t\t// We can assume that ext.enc is set because we are unmarshaling.\n\t\t\to := ext.enc[len(b):]   // skip wire type and field number\n\t\t\t_, n := DecodeVarint(o) // calculate length of length varint\n\t\t\to = o[n:]               // skip length varint\n\t\t\tmsg = append(o, msg...) // join old data and new data\n\t\t}\n\t\tb = append(b, EncodeVarint(uint64(len(msg)))...)\n\t\tb = append(b, msg...)\n\n\t\tm[id] = Extension{enc: b}\n\t}\n\treturn nil\n}\n\n// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.\n// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.\nfunc MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {\n\tvar b bytes.Buffer\n\tb.WriteByte('{')\n\n\t// Process the map in key order for deterministic output.\n\tids := make([]int32, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, id)\n\t}\n\tsort.Sort(int32Slice(ids)) // int32Slice defined in text.go\n\n\tfor i, id := range ids {\n\t\text := m[id]\n\t\tif i > 0 {\n\t\t\tb.WriteByte(',')\n\t\t}\n\n\t\tmsd, ok := messageSetMap[id]\n\t\tif !ok {\n\t\t\t// Unknown type; we can't render it, so skip it.\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(&b, `\"[%s]\":`, msd.name)\n\n\t\tx := ext.value\n\t\tif x == nil {\n\t\t\tx = reflect.New(msd.t.Elem()).Interface()\n\t\t\tif err := Unmarshal(ext.enc, x.(Message)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\td, err := json.Marshal(x)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.Write(d)\n\t}\n\tb.WriteByte('}')\n\treturn b.Bytes(), nil\n}\n\n// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.\n// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.\nfunc UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {\n\t// Common-case fast path.\n\tif len(buf) == 0 || bytes.Equal(buf, []byte(\"{}\")) {\n\t\treturn nil\n\t}\n\n\t// This is fairly tricky, and it's not clear that it is needed.\n\treturn errors.New(\"TODO: UnmarshalMessageSetJSON not yet implemented\")\n}\n\n// A global registry of types that can be used in a MessageSet.\n\nvar messageSetMap = make(map[int32]messageSetDesc)\n\ntype messageSetDesc struct {\n\tt    reflect.Type // pointer to struct\n\tname string\n}\n\n// RegisterMessageSetType is called from the generated code.\nfunc RegisterMessageSetType(m Message, fieldNum int32, name string) {\n\tmessageSetMap[fieldNum] = messageSetDesc{\n\t\tt:    reflect.TypeOf(m),\n\t\tname: name,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/message_set_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestUnmarshalMessageSetWithDuplicate(t *testing.T) {\n\t// Check that a repeated message set entry will be concatenated.\n\tin := &MessageSet{\n\t\tItem: []*_MessageSet_Item{\n\t\t\t{TypeId: Int32(12345), Message: []byte(\"hoo\")},\n\t\t\t{TypeId: Int32(12345), Message: []byte(\"hah\")},\n\t\t},\n\t}\n\tb, err := Marshal(in)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\tt.Logf(\"Marshaled bytes: %q\", b)\n\n\tm := make(map[int32]Extension)\n\tif err := UnmarshalMessageSet(b, m); err != nil {\n\t\tt.Fatalf(\"UnmarshalMessageSet: %v\", err)\n\t}\n\text, ok := m[12345]\n\tif !ok {\n\t\tt.Fatalf(\"Didn't retrieve extension 12345; map is %v\", m)\n\t}\n\t// Skip wire type/field number and length varints.\n\tgot := skipVarint(skipVarint(ext.enc))\n\tif want := []byte(\"hoohah\"); !bytes.Equal(got, want) {\n\t\tt.Errorf(\"Combined extension is %q, want %q\", got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/pointer_reflect.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build appengine\n\n// This file contains an implementation of proto field accesses using package reflect.\n// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can\n// be used on App Engine.\n\npackage proto\n\nimport (\n\t\"math\"\n\t\"reflect\"\n)\n\n// A structPointer is a pointer to a struct.\ntype structPointer struct {\n\tv reflect.Value\n}\n\n// toStructPointer returns a structPointer equivalent to the given reflect value.\n// The reflect value must itself be a pointer to a struct.\nfunc toStructPointer(v reflect.Value) structPointer {\n\treturn structPointer{v}\n}\n\n// IsNil reports whether p is nil.\nfunc structPointer_IsNil(p structPointer) bool {\n\treturn p.v.IsNil()\n}\n\n// Interface returns the struct pointer as an interface value.\nfunc structPointer_Interface(p structPointer, _ reflect.Type) interface{} {\n\treturn p.v.Interface()\n}\n\n// A field identifies a field in a struct, accessible from a structPointer.\n// In this implementation, a field is identified by the sequence of field indices\n// passed to reflect's FieldByIndex.\ntype field []int\n\n// toField returns a field equivalent to the given reflect field.\nfunc toField(f *reflect.StructField) field {\n\treturn f.Index\n}\n\n// invalidField is an invalid field identifier.\nvar invalidField = field(nil)\n\n// IsValid reports whether the field identifier is valid.\nfunc (f field) IsValid() bool { return f != nil }\n\n// field returns the given field in the struct as a reflect value.\nfunc structPointer_field(p structPointer, f field) reflect.Value {\n\t// Special case: an extension map entry with a value of type T\n\t// passes a *T to the struct-handling code with a zero field,\n\t// expecting that it will be treated as equivalent to *struct{ X T },\n\t// which has the same memory layout. We have to handle that case\n\t// specially, because reflect will panic if we call FieldByIndex on a\n\t// non-struct.\n\tif f == nil {\n\t\treturn p.v.Elem()\n\t}\n\n\treturn p.v.Elem().FieldByIndex(f)\n}\n\n// ifield returns the given field in the struct as an interface value.\nfunc structPointer_ifield(p structPointer, f field) interface{} {\n\treturn structPointer_field(p, f).Addr().Interface()\n}\n\n// Bytes returns the address of a []byte field in the struct.\nfunc structPointer_Bytes(p structPointer, f field) *[]byte {\n\treturn structPointer_ifield(p, f).(*[]byte)\n}\n\n// BytesSlice returns the address of a [][]byte field in the struct.\nfunc structPointer_BytesSlice(p structPointer, f field) *[][]byte {\n\treturn structPointer_ifield(p, f).(*[][]byte)\n}\n\n// Bool returns the address of a *bool field in the struct.\nfunc structPointer_Bool(p structPointer, f field) **bool {\n\treturn structPointer_ifield(p, f).(**bool)\n}\n\n// BoolVal returns the address of a bool field in the struct.\nfunc structPointer_BoolVal(p structPointer, f field) *bool {\n\treturn structPointer_ifield(p, f).(*bool)\n}\n\n// BoolSlice returns the address of a []bool field in the struct.\nfunc structPointer_BoolSlice(p structPointer, f field) *[]bool {\n\treturn structPointer_ifield(p, f).(*[]bool)\n}\n\n// String returns the address of a *string field in the struct.\nfunc structPointer_String(p structPointer, f field) **string {\n\treturn structPointer_ifield(p, f).(**string)\n}\n\n// StringVal returns the address of a string field in the struct.\nfunc structPointer_StringVal(p structPointer, f field) *string {\n\treturn structPointer_ifield(p, f).(*string)\n}\n\n// StringSlice returns the address of a []string field in the struct.\nfunc structPointer_StringSlice(p structPointer, f field) *[]string {\n\treturn structPointer_ifield(p, f).(*[]string)\n}\n\n// ExtMap returns the address of an extension map field in the struct.\nfunc structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {\n\treturn structPointer_ifield(p, f).(*map[int32]Extension)\n}\n\n// Map returns the reflect.Value for the address of a map field in the struct.\nfunc structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {\n\treturn structPointer_field(p, f).Addr()\n}\n\n// SetStructPointer writes a *struct field in the struct.\nfunc structPointer_SetStructPointer(p structPointer, f field, q structPointer) {\n\tstructPointer_field(p, f).Set(q.v)\n}\n\n// GetStructPointer reads a *struct field in the struct.\nfunc structPointer_GetStructPointer(p structPointer, f field) structPointer {\n\treturn structPointer{structPointer_field(p, f)}\n}\n\n// StructPointerSlice the address of a []*struct field in the struct.\nfunc structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {\n\treturn structPointerSlice{structPointer_field(p, f)}\n}\n\n// A structPointerSlice represents the address of a slice of pointers to structs\n// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.\ntype structPointerSlice struct {\n\tv reflect.Value\n}\n\nfunc (p structPointerSlice) Len() int                  { return p.v.Len() }\nfunc (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }\nfunc (p structPointerSlice) Append(q structPointer) {\n\tp.v.Set(reflect.Append(p.v, q.v))\n}\n\nvar (\n\tint32Type   = reflect.TypeOf(int32(0))\n\tuint32Type  = reflect.TypeOf(uint32(0))\n\tfloat32Type = reflect.TypeOf(float32(0))\n\tint64Type   = reflect.TypeOf(int64(0))\n\tuint64Type  = reflect.TypeOf(uint64(0))\n\tfloat64Type = reflect.TypeOf(float64(0))\n)\n\n// A word32 represents a field of type *int32, *uint32, *float32, or *enum.\n// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.\ntype word32 struct {\n\tv reflect.Value\n}\n\n// IsNil reports whether p is nil.\nfunc word32_IsNil(p word32) bool {\n\treturn p.v.IsNil()\n}\n\n// Set sets p to point at a newly allocated word with bits set to x.\nfunc word32_Set(p word32, o *Buffer, x uint32) {\n\tt := p.v.Type().Elem()\n\tswitch t {\n\tcase int32Type:\n\t\tif len(o.int32s) == 0 {\n\t\t\to.int32s = make([]int32, uint32PoolSize)\n\t\t}\n\t\to.int32s[0] = int32(x)\n\t\tp.v.Set(reflect.ValueOf(&o.int32s[0]))\n\t\to.int32s = o.int32s[1:]\n\t\treturn\n\tcase uint32Type:\n\t\tif len(o.uint32s) == 0 {\n\t\t\to.uint32s = make([]uint32, uint32PoolSize)\n\t\t}\n\t\to.uint32s[0] = x\n\t\tp.v.Set(reflect.ValueOf(&o.uint32s[0]))\n\t\to.uint32s = o.uint32s[1:]\n\t\treturn\n\tcase float32Type:\n\t\tif len(o.float32s) == 0 {\n\t\t\to.float32s = make([]float32, uint32PoolSize)\n\t\t}\n\t\to.float32s[0] = math.Float32frombits(x)\n\t\tp.v.Set(reflect.ValueOf(&o.float32s[0]))\n\t\to.float32s = o.float32s[1:]\n\t\treturn\n\t}\n\n\t// must be enum\n\tp.v.Set(reflect.New(t))\n\tp.v.Elem().SetInt(int64(int32(x)))\n}\n\n// Get gets the bits pointed at by p, as a uint32.\nfunc word32_Get(p word32) uint32 {\n\telem := p.v.Elem()\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32(p structPointer, f field) word32 {\n\treturn word32{structPointer_field(p, f)}\n}\n\n// A word32Val represents a field of type int32, uint32, float32, or enum.\n// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.\ntype word32Val struct {\n\tv reflect.Value\n}\n\n// Set sets *p to x.\nfunc word32Val_Set(p word32Val, x uint32) {\n\tswitch p.v.Type() {\n\tcase int32Type:\n\t\tp.v.SetInt(int64(x))\n\t\treturn\n\tcase uint32Type:\n\t\tp.v.SetUint(uint64(x))\n\t\treturn\n\tcase float32Type:\n\t\tp.v.SetFloat(float64(math.Float32frombits(x)))\n\t\treturn\n\t}\n\n\t// must be enum\n\tp.v.SetInt(int64(int32(x)))\n}\n\n// Get gets the bits pointed at by p, as a uint32.\nfunc word32Val_Get(p word32Val) uint32 {\n\telem := p.v\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.\nfunc structPointer_Word32Val(p structPointer, f field) word32Val {\n\treturn word32Val{structPointer_field(p, f)}\n}\n\n// A word32Slice is a slice of 32-bit values.\n// That is, v.Type() is []int32, []uint32, []float32, or []enum.\ntype word32Slice struct {\n\tv reflect.Value\n}\n\nfunc (p word32Slice) Append(x uint32) {\n\tn, m := p.v.Len(), p.v.Cap()\n\tif n < m {\n\t\tp.v.SetLen(n + 1)\n\t} else {\n\t\tt := p.v.Type().Elem()\n\t\tp.v.Set(reflect.Append(p.v, reflect.Zero(t)))\n\t}\n\telem := p.v.Index(n)\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\telem.SetInt(int64(int32(x)))\n\tcase reflect.Uint32:\n\t\telem.SetUint(uint64(x))\n\tcase reflect.Float32:\n\t\telem.SetFloat(float64(math.Float32frombits(x)))\n\t}\n}\n\nfunc (p word32Slice) Len() int {\n\treturn p.v.Len()\n}\n\nfunc (p word32Slice) Index(i int) uint32 {\n\telem := p.v.Index(i)\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.\nfunc structPointer_Word32Slice(p structPointer, f field) word32Slice {\n\treturn word32Slice{structPointer_field(p, f)}\n}\n\n// word64 is like word32 but for 64-bit values.\ntype word64 struct {\n\tv reflect.Value\n}\n\nfunc word64_Set(p word64, o *Buffer, x uint64) {\n\tt := p.v.Type().Elem()\n\tswitch t {\n\tcase int64Type:\n\t\tif len(o.int64s) == 0 {\n\t\t\to.int64s = make([]int64, uint64PoolSize)\n\t\t}\n\t\to.int64s[0] = int64(x)\n\t\tp.v.Set(reflect.ValueOf(&o.int64s[0]))\n\t\to.int64s = o.int64s[1:]\n\t\treturn\n\tcase uint64Type:\n\t\tif len(o.uint64s) == 0 {\n\t\t\to.uint64s = make([]uint64, uint64PoolSize)\n\t\t}\n\t\to.uint64s[0] = x\n\t\tp.v.Set(reflect.ValueOf(&o.uint64s[0]))\n\t\to.uint64s = o.uint64s[1:]\n\t\treturn\n\tcase float64Type:\n\t\tif len(o.float64s) == 0 {\n\t\t\to.float64s = make([]float64, uint64PoolSize)\n\t\t}\n\t\to.float64s[0] = math.Float64frombits(x)\n\t\tp.v.Set(reflect.ValueOf(&o.float64s[0]))\n\t\to.float64s = o.float64s[1:]\n\t\treturn\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc word64_IsNil(p word64) bool {\n\treturn p.v.IsNil()\n}\n\nfunc word64_Get(p word64) uint64 {\n\telem := p.v.Elem()\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn elem.Uint()\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(elem.Float())\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64(p structPointer, f field) word64 {\n\treturn word64{structPointer_field(p, f)}\n}\n\n// word64Val is like word32Val but for 64-bit values.\ntype word64Val struct {\n\tv reflect.Value\n}\n\nfunc word64Val_Set(p word64Val, o *Buffer, x uint64) {\n\tswitch p.v.Type() {\n\tcase int64Type:\n\t\tp.v.SetInt(int64(x))\n\t\treturn\n\tcase uint64Type:\n\t\tp.v.SetUint(x)\n\t\treturn\n\tcase float64Type:\n\t\tp.v.SetFloat(math.Float64frombits(x))\n\t\treturn\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc word64Val_Get(p word64Val) uint64 {\n\telem := p.v\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn elem.Uint()\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(elem.Float())\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64Val(p structPointer, f field) word64Val {\n\treturn word64Val{structPointer_field(p, f)}\n}\n\ntype word64Slice struct {\n\tv reflect.Value\n}\n\nfunc (p word64Slice) Append(x uint64) {\n\tn, m := p.v.Len(), p.v.Cap()\n\tif n < m {\n\t\tp.v.SetLen(n + 1)\n\t} else {\n\t\tt := p.v.Type().Elem()\n\t\tp.v.Set(reflect.Append(p.v, reflect.Zero(t)))\n\t}\n\telem := p.v.Index(n)\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\telem.SetInt(int64(int64(x)))\n\tcase reflect.Uint64:\n\t\telem.SetUint(uint64(x))\n\tcase reflect.Float64:\n\t\telem.SetFloat(float64(math.Float64frombits(x)))\n\t}\n}\n\nfunc (p word64Slice) Len() int {\n\treturn p.v.Len()\n}\n\nfunc (p word64Slice) Index(i int) uint64 {\n\telem := p.v.Index(i)\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn uint64(elem.Uint())\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(float64(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64Slice(p structPointer, f field) word64Slice {\n\treturn word64Slice{structPointer_field(p, f)}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/pointer_unsafe.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build !appengine\n\n// This file contains the implementation of the proto field accesses using package unsafe.\n\npackage proto\n\nimport (\n\t\"reflect\"\n\t\"unsafe\"\n)\n\n// NOTE: These type_Foo functions would more idiomatically be methods,\n// but Go does not allow methods on pointer types, and we must preserve\n// some pointer type for the garbage collector. We use these\n// funcs with clunky names as our poor approximation to methods.\n//\n// An alternative would be\n//\ttype structPointer struct { p unsafe.Pointer }\n// but that does not registerize as well.\n\n// A structPointer is a pointer to a struct.\ntype structPointer unsafe.Pointer\n\n// toStructPointer returns a structPointer equivalent to the given reflect value.\nfunc toStructPointer(v reflect.Value) structPointer {\n\treturn structPointer(unsafe.Pointer(v.Pointer()))\n}\n\n// IsNil reports whether p is nil.\nfunc structPointer_IsNil(p structPointer) bool {\n\treturn p == nil\n}\n\n// Interface returns the struct pointer, assumed to have element type t,\n// as an interface value.\nfunc structPointer_Interface(p structPointer, t reflect.Type) interface{} {\n\treturn reflect.NewAt(t, unsafe.Pointer(p)).Interface()\n}\n\n// A field identifies a field in a struct, accessible from a structPointer.\n// In this implementation, a field is identified by its byte offset from the start of the struct.\ntype field uintptr\n\n// toField returns a field equivalent to the given reflect field.\nfunc toField(f *reflect.StructField) field {\n\treturn field(f.Offset)\n}\n\n// invalidField is an invalid field identifier.\nconst invalidField = ^field(0)\n\n// IsValid reports whether the field identifier is valid.\nfunc (f field) IsValid() bool {\n\treturn f != ^field(0)\n}\n\n// Bytes returns the address of a []byte field in the struct.\nfunc structPointer_Bytes(p structPointer, f field) *[]byte {\n\treturn (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BytesSlice returns the address of a [][]byte field in the struct.\nfunc structPointer_BytesSlice(p structPointer, f field) *[][]byte {\n\treturn (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// Bool returns the address of a *bool field in the struct.\nfunc structPointer_Bool(p structPointer, f field) **bool {\n\treturn (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BoolVal returns the address of a bool field in the struct.\nfunc structPointer_BoolVal(p structPointer, f field) *bool {\n\treturn (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BoolSlice returns the address of a []bool field in the struct.\nfunc structPointer_BoolSlice(p structPointer, f field) *[]bool {\n\treturn (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// String returns the address of a *string field in the struct.\nfunc structPointer_String(p structPointer, f field) **string {\n\treturn (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StringVal returns the address of a string field in the struct.\nfunc structPointer_StringVal(p structPointer, f field) *string {\n\treturn (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StringSlice returns the address of a []string field in the struct.\nfunc structPointer_StringSlice(p structPointer, f field) *[]string {\n\treturn (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// ExtMap returns the address of an extension map field in the struct.\nfunc structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {\n\treturn (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// Map returns the reflect.Value for the address of a map field in the struct.\nfunc structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {\n\treturn reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))\n}\n\n// SetStructPointer writes a *struct field in the struct.\nfunc structPointer_SetStructPointer(p structPointer, f field, q structPointer) {\n\t*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q\n}\n\n// GetStructPointer reads a *struct field in the struct.\nfunc structPointer_GetStructPointer(p structPointer, f field) structPointer {\n\treturn *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StructPointerSlice the address of a []*struct field in the struct.\nfunc structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {\n\treturn (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).\ntype structPointerSlice []structPointer\n\nfunc (v *structPointerSlice) Len() int                  { return len(*v) }\nfunc (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }\nfunc (v *structPointerSlice) Append(p structPointer)    { *v = append(*v, p) }\n\n// A word32 is the address of a \"pointer to 32-bit value\" field.\ntype word32 **uint32\n\n// IsNil reports whether *v is nil.\nfunc word32_IsNil(p word32) bool {\n\treturn *p == nil\n}\n\n// Set sets *v to point at a newly allocated word set to x.\nfunc word32_Set(p word32, o *Buffer, x uint32) {\n\tif len(o.uint32s) == 0 {\n\t\to.uint32s = make([]uint32, uint32PoolSize)\n\t}\n\to.uint32s[0] = x\n\t*p = &o.uint32s[0]\n\to.uint32s = o.uint32s[1:]\n}\n\n// Get gets the value pointed at by *v.\nfunc word32_Get(p word32) uint32 {\n\treturn **p\n}\n\n// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32(p structPointer, f field) word32 {\n\treturn word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// A word32Val is the address of a 32-bit value field.\ntype word32Val *uint32\n\n// Set sets *p to x.\nfunc word32Val_Set(p word32Val, x uint32) {\n\t*p = x\n}\n\n// Get gets the value pointed at by p.\nfunc word32Val_Get(p word32Val) uint32 {\n\treturn *p\n}\n\n// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32Val(p structPointer, f field) word32Val {\n\treturn word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// A word32Slice is a slice of 32-bit values.\ntype word32Slice []uint32\n\nfunc (v *word32Slice) Append(x uint32)    { *v = append(*v, x) }\nfunc (v *word32Slice) Len() int           { return len(*v) }\nfunc (v *word32Slice) Index(i int) uint32 { return (*v)[i] }\n\n// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.\nfunc structPointer_Word32Slice(p structPointer, f field) *word32Slice {\n\treturn (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// word64 is like word32 but for 64-bit values.\ntype word64 **uint64\n\nfunc word64_Set(p word64, o *Buffer, x uint64) {\n\tif len(o.uint64s) == 0 {\n\t\to.uint64s = make([]uint64, uint64PoolSize)\n\t}\n\to.uint64s[0] = x\n\t*p = &o.uint64s[0]\n\to.uint64s = o.uint64s[1:]\n}\n\nfunc word64_IsNil(p word64) bool {\n\treturn *p == nil\n}\n\nfunc word64_Get(p word64) uint64 {\n\treturn **p\n}\n\nfunc structPointer_Word64(p structPointer, f field) word64 {\n\treturn word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// word64Val is like word32Val but for 64-bit values.\ntype word64Val *uint64\n\nfunc word64Val_Set(p word64Val, o *Buffer, x uint64) {\n\t*p = x\n}\n\nfunc word64Val_Get(p word64Val) uint64 {\n\treturn *p\n}\n\nfunc structPointer_Word64Val(p structPointer, f field) word64Val {\n\treturn word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// word64Slice is like word32Slice but for 64-bit values.\ntype word64Slice []uint64\n\nfunc (v *word64Slice) Append(x uint64)    { *v = append(*v, x) }\nfunc (v *word64Slice) Len() int           { return len(*v) }\nfunc (v *word64Slice) Index(i int) uint64 { return (*v)[i] }\n\nfunc structPointer_Word64Slice(p structPointer, f field) *word64Slice {\n\treturn (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/properties.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for encoding data into the wire format for protocol buffers.\n */\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst debug bool = false\n\n// Constants that identify the encoding of a value on the wire.\nconst (\n\tWireVarint     = 0\n\tWireFixed64    = 1\n\tWireBytes      = 2\n\tWireStartGroup = 3\n\tWireEndGroup   = 4\n\tWireFixed32    = 5\n)\n\nconst startSize = 10 // initial slice/string sizes\n\n// Encoders are defined in encode.go\n// An encoder outputs the full representation of a field, including its\n// tag and encoder type.\ntype encoder func(p *Buffer, prop *Properties, base structPointer) error\n\n// A valueEncoder encodes a single integer in a particular encoding.\ntype valueEncoder func(o *Buffer, x uint64) error\n\n// Sizers are defined in encode.go\n// A sizer returns the encoded size of a field, including its tag and encoder\n// type.\ntype sizer func(prop *Properties, base structPointer) int\n\n// A valueSizer returns the encoded size of a single integer in a particular\n// encoding.\ntype valueSizer func(x uint64) int\n\n// Decoders are defined in decode.go\n// A decoder creates a value from its wire representation.\n// Unrecognized subelements are saved in unrec.\ntype decoder func(p *Buffer, prop *Properties, base structPointer) error\n\n// A valueDecoder decodes a single integer in a particular encoding.\ntype valueDecoder func(o *Buffer) (x uint64, err error)\n\n// tagMap is an optimization over map[int]int for typical protocol buffer\n// use-cases. Encoded protocol buffers are often in tag order with small tag\n// numbers.\ntype tagMap struct {\n\tfastTags []int\n\tslowTags map[int]int\n}\n\n// tagMapFastLimit is the upper bound on the tag number that will be stored in\n// the tagMap slice rather than its map.\nconst tagMapFastLimit = 1024\n\nfunc (p *tagMap) get(t int) (int, bool) {\n\tif t > 0 && t < tagMapFastLimit {\n\t\tif t >= len(p.fastTags) {\n\t\t\treturn 0, false\n\t\t}\n\t\tfi := p.fastTags[t]\n\t\treturn fi, fi >= 0\n\t}\n\tfi, ok := p.slowTags[t]\n\treturn fi, ok\n}\n\nfunc (p *tagMap) put(t int, fi int) {\n\tif t > 0 && t < tagMapFastLimit {\n\t\tfor len(p.fastTags) < t+1 {\n\t\t\tp.fastTags = append(p.fastTags, -1)\n\t\t}\n\t\tp.fastTags[t] = fi\n\t\treturn\n\t}\n\tif p.slowTags == nil {\n\t\tp.slowTags = make(map[int]int)\n\t}\n\tp.slowTags[t] = fi\n}\n\n// StructProperties represents properties for all the fields of a struct.\n// decoderTags and decoderOrigNames should only be used by the decoder.\ntype StructProperties struct {\n\tProp             []*Properties  // properties for each field\n\treqCount         int            // required count\n\tdecoderTags      tagMap         // map from proto tag to struct field number\n\tdecoderOrigNames map[string]int // map from original name to struct field number\n\torder            []int          // list of struct field numbers in tag order\n\tunrecField       field          // field id of the XXX_unrecognized []byte field\n\textendable       bool           // is this an extendable proto\n}\n\n// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.\n// See encode.go, (*Buffer).enc_struct.\n\nfunc (sp *StructProperties) Len() int { return len(sp.order) }\nfunc (sp *StructProperties) Less(i, j int) bool {\n\treturn sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag\n}\nfunc (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }\n\n// Properties represents the protocol-specific behavior of a single struct field.\ntype Properties struct {\n\tName     string // name of the field, for error messages\n\tOrigName string // original name before protocol compiler (always set)\n\tWire     string\n\tWireType int\n\tTag      int\n\tRequired bool\n\tOptional bool\n\tRepeated bool\n\tPacked   bool   // relevant for repeated primitives only\n\tEnum     string // set for enum types only\n\tproto3   bool   // whether this is known to be a proto3 field; set for []byte only\n\n\tDefault    string // default value\n\tHasDefault bool   // whether an explicit default was provided\n\tdef_uint64 uint64\n\n\tenc           encoder\n\tvalEnc        valueEncoder // set for bool and numeric types only\n\tfield         field\n\ttagcode       []byte // encoding of EncodeVarint((Tag<<3)|WireType)\n\ttagbuf        [8]byte\n\tstype         reflect.Type      // set for struct types only\n\tsprop         *StructProperties // set for struct types only\n\tisMarshaler   bool\n\tisUnmarshaler bool\n\n\tmtype    reflect.Type // set for map types only\n\tmkeyprop *Properties  // set for map types only\n\tmvalprop *Properties  // set for map types only\n\n\tsize    sizer\n\tvalSize valueSizer // set for bool and numeric types only\n\n\tdec    decoder\n\tvalDec valueDecoder // set for bool and numeric types only\n\n\t// If this is a packable field, this will be the decoder for the packed version of the field.\n\tpackedDec decoder\n}\n\n// String formats the properties in the protobuf struct field tag style.\nfunc (p *Properties) String() string {\n\ts := p.Wire\n\ts = \",\"\n\ts += strconv.Itoa(p.Tag)\n\tif p.Required {\n\t\ts += \",req\"\n\t}\n\tif p.Optional {\n\t\ts += \",opt\"\n\t}\n\tif p.Repeated {\n\t\ts += \",rep\"\n\t}\n\tif p.Packed {\n\t\ts += \",packed\"\n\t}\n\tif p.OrigName != p.Name {\n\t\ts += \",name=\" + p.OrigName\n\t}\n\tif p.proto3 {\n\t\ts += \",proto3\"\n\t}\n\tif len(p.Enum) > 0 {\n\t\ts += \",enum=\" + p.Enum\n\t}\n\tif p.HasDefault {\n\t\ts += \",def=\" + p.Default\n\t}\n\treturn s\n}\n\n// Parse populates p by parsing a string in the protobuf struct field tag style.\nfunc (p *Properties) Parse(s string) {\n\t// \"bytes,49,opt,name=foo,def=hello!\"\n\tfields := strings.Split(s, \",\") // breaks def=, but handled below.\n\tif len(fields) < 2 {\n\t\tfmt.Fprintf(os.Stderr, \"proto: tag has too few fields: %q\\n\", s)\n\t\treturn\n\t}\n\n\tp.Wire = fields[0]\n\tswitch p.Wire {\n\tcase \"varint\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeVarint\n\t\tp.valDec = (*Buffer).DecodeVarint\n\t\tp.valSize = sizeVarint\n\tcase \"fixed32\":\n\t\tp.WireType = WireFixed32\n\t\tp.valEnc = (*Buffer).EncodeFixed32\n\t\tp.valDec = (*Buffer).DecodeFixed32\n\t\tp.valSize = sizeFixed32\n\tcase \"fixed64\":\n\t\tp.WireType = WireFixed64\n\t\tp.valEnc = (*Buffer).EncodeFixed64\n\t\tp.valDec = (*Buffer).DecodeFixed64\n\t\tp.valSize = sizeFixed64\n\tcase \"zigzag32\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeZigzag32\n\t\tp.valDec = (*Buffer).DecodeZigzag32\n\t\tp.valSize = sizeZigzag32\n\tcase \"zigzag64\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeZigzag64\n\t\tp.valDec = (*Buffer).DecodeZigzag64\n\t\tp.valSize = sizeZigzag64\n\tcase \"bytes\", \"group\":\n\t\tp.WireType = WireBytes\n\t\t// no numeric converter for non-numeric types\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"proto: tag has unknown wire type: %q\\n\", s)\n\t\treturn\n\t}\n\n\tvar err error\n\tp.Tag, err = strconv.Atoi(fields[1])\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 2; i < len(fields); i++ {\n\t\tf := fields[i]\n\t\tswitch {\n\t\tcase f == \"req\":\n\t\t\tp.Required = true\n\t\tcase f == \"opt\":\n\t\t\tp.Optional = true\n\t\tcase f == \"rep\":\n\t\t\tp.Repeated = true\n\t\tcase f == \"packed\":\n\t\t\tp.Packed = true\n\t\tcase strings.HasPrefix(f, \"name=\"):\n\t\t\tp.OrigName = f[5:]\n\t\tcase strings.HasPrefix(f, \"enum=\"):\n\t\t\tp.Enum = f[5:]\n\t\tcase f == \"proto3\":\n\t\t\tp.proto3 = true\n\t\tcase strings.HasPrefix(f, \"def=\"):\n\t\t\tp.HasDefault = true\n\t\t\tp.Default = f[4:] // rest of string\n\t\t\tif i+1 < len(fields) {\n\t\t\t\t// Commas aren't escaped, and def is always last.\n\t\t\t\tp.Default += \",\" + strings.Join(fields[i+1:], \",\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc logNoSliceEnc(t1, t2 reflect.Type) {\n\tfmt.Fprintf(os.Stderr, \"proto: no slice oenc for %T = []%T\\n\", t1, t2)\n}\n\nvar protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()\n\n// Initialize the fields for encoding and decoding.\nfunc (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {\n\tp.enc = nil\n\tp.dec = nil\n\tp.size = nil\n\n\tswitch t1 := typ; t1.Kind() {\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"proto: no coders for %v\\n\", t1)\n\n\t// proto3 scalar types\n\n\tcase reflect.Bool:\n\t\tp.enc = (*Buffer).enc_proto3_bool\n\t\tp.dec = (*Buffer).dec_proto3_bool\n\t\tp.size = size_proto3_bool\n\tcase reflect.Int32:\n\t\tp.enc = (*Buffer).enc_proto3_int32\n\t\tp.dec = (*Buffer).dec_proto3_int32\n\t\tp.size = size_proto3_int32\n\tcase reflect.Uint32:\n\t\tp.enc = (*Buffer).enc_proto3_uint32\n\t\tp.dec = (*Buffer).dec_proto3_int32 // can reuse\n\t\tp.size = size_proto3_uint32\n\tcase reflect.Int64, reflect.Uint64:\n\t\tp.enc = (*Buffer).enc_proto3_int64\n\t\tp.dec = (*Buffer).dec_proto3_int64\n\t\tp.size = size_proto3_int64\n\tcase reflect.Float32:\n\t\tp.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits\n\t\tp.dec = (*Buffer).dec_proto3_int32\n\t\tp.size = size_proto3_uint32\n\tcase reflect.Float64:\n\t\tp.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits\n\t\tp.dec = (*Buffer).dec_proto3_int64\n\t\tp.size = size_proto3_int64\n\tcase reflect.String:\n\t\tp.enc = (*Buffer).enc_proto3_string\n\t\tp.dec = (*Buffer).dec_proto3_string\n\t\tp.size = size_proto3_string\n\n\tcase reflect.Ptr:\n\t\tswitch t2 := t1.Elem(); t2.Kind() {\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"proto: no encoder function for %v -> %v\\n\", t1, t2)\n\t\t\tbreak\n\t\tcase reflect.Bool:\n\t\t\tp.enc = (*Buffer).enc_bool\n\t\t\tp.dec = (*Buffer).dec_bool\n\t\t\tp.size = size_bool\n\t\tcase reflect.Int32:\n\t\t\tp.enc = (*Buffer).enc_int32\n\t\t\tp.dec = (*Buffer).dec_int32\n\t\t\tp.size = size_int32\n\t\tcase reflect.Uint32:\n\t\t\tp.enc = (*Buffer).enc_uint32\n\t\t\tp.dec = (*Buffer).dec_int32 // can reuse\n\t\t\tp.size = size_uint32\n\t\tcase reflect.Int64, reflect.Uint64:\n\t\t\tp.enc = (*Buffer).enc_int64\n\t\t\tp.dec = (*Buffer).dec_int64\n\t\t\tp.size = size_int64\n\t\tcase reflect.Float32:\n\t\t\tp.enc = (*Buffer).enc_uint32 // can just treat them as bits\n\t\t\tp.dec = (*Buffer).dec_int32\n\t\t\tp.size = size_uint32\n\t\tcase reflect.Float64:\n\t\t\tp.enc = (*Buffer).enc_int64 // can just treat them as bits\n\t\t\tp.dec = (*Buffer).dec_int64\n\t\t\tp.size = size_int64\n\t\tcase reflect.String:\n\t\t\tp.enc = (*Buffer).enc_string\n\t\t\tp.dec = (*Buffer).dec_string\n\t\t\tp.size = size_string\n\t\tcase reflect.Struct:\n\t\t\tp.stype = t1.Elem()\n\t\t\tp.isMarshaler = isMarshaler(t1)\n\t\t\tp.isUnmarshaler = isUnmarshaler(t1)\n\t\t\tif p.Wire == \"bytes\" {\n\t\t\t\tp.enc = (*Buffer).enc_struct_message\n\t\t\t\tp.dec = (*Buffer).dec_struct_message\n\t\t\t\tp.size = size_struct_message\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_struct_group\n\t\t\t\tp.dec = (*Buffer).dec_struct_group\n\t\t\t\tp.size = size_struct_group\n\t\t\t}\n\t\t}\n\n\tcase reflect.Slice:\n\t\tswitch t2 := t1.Elem(); t2.Kind() {\n\t\tdefault:\n\t\t\tlogNoSliceEnc(t1, t2)\n\t\t\tbreak\n\t\tcase reflect.Bool:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_bool\n\t\t\t\tp.size = size_slice_packed_bool\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_bool\n\t\t\t\tp.size = size_slice_bool\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_bool\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_bool\n\t\tcase reflect.Int32:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int32\n\t\t\t\tp.size = size_slice_packed_int32\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_int32\n\t\t\t\tp.size = size_slice_int32\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\tcase reflect.Uint32:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_uint32\n\t\t\t\tp.size = size_slice_packed_uint32\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_uint32\n\t\t\t\tp.size = size_slice_uint32\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\tcase reflect.Int64, reflect.Uint64:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int64\n\t\t\t\tp.size = size_slice_packed_int64\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_int64\n\t\t\t\tp.size = size_slice_int64\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int64\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int64\n\t\tcase reflect.Uint8:\n\t\t\tp.enc = (*Buffer).enc_slice_byte\n\t\t\tp.dec = (*Buffer).dec_slice_byte\n\t\t\tp.size = size_slice_byte\n\t\t\t// This is a []byte, which is either a bytes field,\n\t\t\t// or the value of a map field. In the latter case,\n\t\t\t// we always encode an empty []byte, so we should not\n\t\t\t// use the proto3 enc/size funcs.\n\t\t\t// f == nil iff this is the key/value of a map field.\n\t\t\tif p.proto3 && f != nil {\n\t\t\t\tp.enc = (*Buffer).enc_proto3_slice_byte\n\t\t\t\tp.size = size_proto3_slice_byte\n\t\t\t}\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tswitch t2.Bits() {\n\t\t\tcase 32:\n\t\t\t\t// can just treat them as bits\n\t\t\t\tif p.Packed {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_packed_uint32\n\t\t\t\t\tp.size = size_slice_packed_uint32\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_uint32\n\t\t\t\t\tp.size = size_slice_uint32\n\t\t\t\t}\n\t\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\t\tcase 64:\n\t\t\t\t// can just treat them as bits\n\t\t\t\tif p.Packed {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int64\n\t\t\t\t\tp.size = size_slice_packed_int64\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_int64\n\t\t\t\t\tp.size = size_slice_int64\n\t\t\t\t}\n\t\t\t\tp.dec = (*Buffer).dec_slice_int64\n\t\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int64\n\t\t\tdefault:\n\t\t\t\tlogNoSliceEnc(t1, t2)\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase reflect.String:\n\t\t\tp.enc = (*Buffer).enc_slice_string\n\t\t\tp.dec = (*Buffer).dec_slice_string\n\t\t\tp.size = size_slice_string\n\t\tcase reflect.Ptr:\n\t\t\tswitch t3 := t2.Elem(); t3.Kind() {\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"proto: no ptr oenc for %T -> %T -> %T\\n\", t1, t2, t3)\n\t\t\t\tbreak\n\t\t\tcase reflect.Struct:\n\t\t\t\tp.stype = t2.Elem()\n\t\t\t\tp.isMarshaler = isMarshaler(t2)\n\t\t\t\tp.isUnmarshaler = isUnmarshaler(t2)\n\t\t\t\tif p.Wire == \"bytes\" {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_struct_message\n\t\t\t\t\tp.dec = (*Buffer).dec_slice_struct_message\n\t\t\t\t\tp.size = size_slice_struct_message\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_struct_group\n\t\t\t\t\tp.dec = (*Buffer).dec_slice_struct_group\n\t\t\t\t\tp.size = size_slice_struct_group\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tswitch t2.Elem().Kind() {\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"proto: no slice elem oenc for %T -> %T -> %T\\n\", t1, t2, t2.Elem())\n\t\t\t\tbreak\n\t\t\tcase reflect.Uint8:\n\t\t\t\tp.enc = (*Buffer).enc_slice_slice_byte\n\t\t\t\tp.dec = (*Buffer).dec_slice_slice_byte\n\t\t\t\tp.size = size_slice_slice_byte\n\t\t\t}\n\t\t}\n\n\tcase reflect.Map:\n\t\tp.enc = (*Buffer).enc_new_map\n\t\tp.dec = (*Buffer).dec_new_map\n\t\tp.size = size_new_map\n\n\t\tp.mtype = t1\n\t\tp.mkeyprop = &Properties{}\n\t\tp.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), \"Key\", f.Tag.Get(\"protobuf_key\"), nil, lockGetProp)\n\t\tp.mvalprop = &Properties{}\n\t\tvtype := p.mtype.Elem()\n\t\tif vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {\n\t\t\t// The value type is not a message (*T) or bytes ([]byte),\n\t\t\t// so we need encoders for the pointer to this type.\n\t\t\tvtype = reflect.PtrTo(vtype)\n\t\t}\n\t\tp.mvalprop.init(vtype, \"Value\", f.Tag.Get(\"protobuf_val\"), nil, lockGetProp)\n\t}\n\n\t// precalculate tag code\n\twire := p.WireType\n\tif p.Packed {\n\t\twire = WireBytes\n\t}\n\tx := uint32(p.Tag)<<3 | uint32(wire)\n\ti := 0\n\tfor i = 0; x > 127; i++ {\n\t\tp.tagbuf[i] = 0x80 | uint8(x&0x7F)\n\t\tx >>= 7\n\t}\n\tp.tagbuf[i] = uint8(x)\n\tp.tagcode = p.tagbuf[0 : i+1]\n\n\tif p.stype != nil {\n\t\tif lockGetProp {\n\t\t\tp.sprop = GetProperties(p.stype)\n\t\t} else {\n\t\t\tp.sprop = getPropertiesLocked(p.stype)\n\t\t}\n\t}\n}\n\nvar (\n\tmarshalerType   = reflect.TypeOf((*Marshaler)(nil)).Elem()\n\tunmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()\n)\n\n// isMarshaler reports whether type t implements Marshaler.\nfunc isMarshaler(t reflect.Type) bool {\n\t// We're checking for (likely) pointer-receiver methods\n\t// so if t is not a pointer, something is very wrong.\n\t// The calls above only invoke isMarshaler on pointer types.\n\tif t.Kind() != reflect.Ptr {\n\t\tpanic(\"proto: misuse of isMarshaler\")\n\t}\n\treturn t.Implements(marshalerType)\n}\n\n// isUnmarshaler reports whether type t implements Unmarshaler.\nfunc isUnmarshaler(t reflect.Type) bool {\n\t// We're checking for (likely) pointer-receiver methods\n\t// so if t is not a pointer, something is very wrong.\n\t// The calls above only invoke isUnmarshaler on pointer types.\n\tif t.Kind() != reflect.Ptr {\n\t\tpanic(\"proto: misuse of isUnmarshaler\")\n\t}\n\treturn t.Implements(unmarshalerType)\n}\n\n// Init populates the properties from a protocol buffer struct tag.\nfunc (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {\n\tp.init(typ, name, tag, f, true)\n}\n\nfunc (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {\n\t// \"bytes,49,opt,def=hello!\"\n\tp.Name = name\n\tp.OrigName = name\n\tif f != nil {\n\t\tp.field = toField(f)\n\t}\n\tif tag == \"\" {\n\t\treturn\n\t}\n\tp.Parse(tag)\n\tp.setEncAndDec(typ, f, lockGetProp)\n}\n\nvar (\n\tpropertiesMu  sync.RWMutex\n\tpropertiesMap = make(map[reflect.Type]*StructProperties)\n)\n\n// GetProperties returns the list of properties for the type represented by t.\n// t must represent a generated struct type of a protocol message.\nfunc GetProperties(t reflect.Type) *StructProperties {\n\tif t.Kind() != reflect.Struct {\n\t\tpanic(\"proto: type must have kind struct\")\n\t}\n\n\t// Most calls to GetProperties in a long-running program will be\n\t// retrieving details for types we have seen before.\n\tpropertiesMu.RLock()\n\tsprop, ok := propertiesMap[t]\n\tpropertiesMu.RUnlock()\n\tif ok {\n\t\tif collectStats {\n\t\t\tstats.Chit++\n\t\t}\n\t\treturn sprop\n\t}\n\n\tpropertiesMu.Lock()\n\tsprop = getPropertiesLocked(t)\n\tpropertiesMu.Unlock()\n\treturn sprop\n}\n\n// getPropertiesLocked requires that propertiesMu is held.\nfunc getPropertiesLocked(t reflect.Type) *StructProperties {\n\tif prop, ok := propertiesMap[t]; ok {\n\t\tif collectStats {\n\t\t\tstats.Chit++\n\t\t}\n\t\treturn prop\n\t}\n\tif collectStats {\n\t\tstats.Cmiss++\n\t}\n\n\tprop := new(StructProperties)\n\t// in case of recursive protos, fill this in now.\n\tpropertiesMap[t] = prop\n\n\t// build properties\n\tprop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)\n\tprop.unrecField = invalidField\n\tprop.Prop = make([]*Properties, t.NumField())\n\tprop.order = make([]int, t.NumField())\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tp := new(Properties)\n\t\tname := f.Name\n\t\tp.init(f.Type, name, f.Tag.Get(\"protobuf\"), &f, false)\n\n\t\tif f.Name == \"XXX_extensions\" { // special case\n\t\t\tp.enc = (*Buffer).enc_map\n\t\t\tp.dec = nil // not needed\n\t\t\tp.size = size_map\n\t\t}\n\t\tif f.Name == \"XXX_unrecognized\" { // special case\n\t\t\tprop.unrecField = toField(&f)\n\t\t}\n\t\tprop.Prop[i] = p\n\t\tprop.order[i] = i\n\t\tif debug {\n\t\t\tprint(i, \" \", f.Name, \" \", t.String(), \" \")\n\t\t\tif p.Tag > 0 {\n\t\t\t\tprint(p.String())\n\t\t\t}\n\t\t\tprint(\"\\n\")\n\t\t}\n\t\tif p.enc == nil && !strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tfmt.Fprintln(os.Stderr, \"proto: no encoder for\", f.Name, f.Type.String(), \"[GetProperties]\")\n\t\t}\n\t}\n\n\t// Re-order prop.order.\n\tsort.Sort(prop)\n\n\t// build required counts\n\t// build tags\n\treqCount := 0\n\tprop.decoderOrigNames = make(map[string]int)\n\tfor i, p := range prop.Prop {\n\t\tif strings.HasPrefix(p.Name, \"XXX_\") {\n\t\t\t// Internal fields should not appear in tags/origNames maps.\n\t\t\t// They are handled specially when encoding and decoding.\n\t\t\tcontinue\n\t\t}\n\t\tif p.Required {\n\t\t\treqCount++\n\t\t}\n\t\tprop.decoderTags.put(p.Tag, i)\n\t\tprop.decoderOrigNames[p.OrigName] = i\n\t}\n\tprop.reqCount = reqCount\n\n\treturn prop\n}\n\n// Return the Properties object for the x[0]'th field of the structure.\nfunc propByIndex(t reflect.Type, x []int) *Properties {\n\tif len(x) != 1 {\n\t\tfmt.Fprintf(os.Stderr, \"proto: field index dimension %d (not 1) for type %s\\n\", len(x), t)\n\t\treturn nil\n\t}\n\tprop := GetProperties(t)\n\treturn prop.Prop[x[0]]\n}\n\n// Get the address and type of a pointer to a struct from an interface.\nfunc getbase(pb Message) (t reflect.Type, b structPointer, err error) {\n\tif pb == nil {\n\t\terr = ErrNil\n\t\treturn\n\t}\n\t// get the reflect type of the pointer to the struct.\n\tt = reflect.TypeOf(pb)\n\t// get the address of the struct.\n\tvalue := reflect.ValueOf(pb)\n\tb = toStructPointer(value)\n\treturn\n}\n\n// A global registry of enum types.\n// The generated code will register the generated maps by calling RegisterEnum.\n\nvar enumValueMaps = make(map[string]map[string]int32)\n\n// RegisterEnum is called from the generated code to install the enum descriptor\n// maps into the global table to aid parsing text format protocol buffers.\nfunc RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {\n\tif _, ok := enumValueMaps[typeName]; ok {\n\t\tpanic(\"proto: duplicate enum registered: \" + typeName)\n\t}\n\tenumValueMaps[typeName] = valueMap\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: proto3_proto/proto3.proto\n// DO NOT EDIT!\n\n/*\nPackage proto3_proto is a generated protocol buffer package.\n\nIt is generated from these files:\n\tproto3_proto/proto3.proto\n\nIt has these top-level messages:\n\tMessage\n\tNested\n\tMessageWithMap\n*/\npackage proto3_proto\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport testdata \"github.com/golang/protobuf/proto/testdata\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\n\ntype Message_Humour int32\n\nconst (\n\tMessage_UNKNOWN     Message_Humour = 0\n\tMessage_PUNS        Message_Humour = 1\n\tMessage_SLAPSTICK   Message_Humour = 2\n\tMessage_BILL_BAILEY Message_Humour = 3\n)\n\nvar Message_Humour_name = map[int32]string{\n\t0: \"UNKNOWN\",\n\t1: \"PUNS\",\n\t2: \"SLAPSTICK\",\n\t3: \"BILL_BAILEY\",\n}\nvar Message_Humour_value = map[string]int32{\n\t\"UNKNOWN\":     0,\n\t\"PUNS\":        1,\n\t\"SLAPSTICK\":   2,\n\t\"BILL_BAILEY\": 3,\n}\n\nfunc (x Message_Humour) String() string {\n\treturn proto.EnumName(Message_Humour_name, int32(x))\n}\n\ntype Message struct {\n\tName         string                           `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tHilarity     Message_Humour                   `protobuf:\"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour\" json:\"hilarity,omitempty\"`\n\tHeightInCm   uint32                           `protobuf:\"varint,3,opt,name=height_in_cm\" json:\"height_in_cm,omitempty\"`\n\tData         []byte                           `protobuf:\"bytes,4,opt,name=data,proto3\" json:\"data,omitempty\"`\n\tResultCount  int64                            `protobuf:\"varint,7,opt,name=result_count\" json:\"result_count,omitempty\"`\n\tTrueScotsman bool                             `protobuf:\"varint,8,opt,name=true_scotsman\" json:\"true_scotsman,omitempty\"`\n\tScore        float32                          `protobuf:\"fixed32,9,opt,name=score\" json:\"score,omitempty\"`\n\tKey          []uint64                         `protobuf:\"varint,5,rep,name=key\" json:\"key,omitempty\"`\n\tNested       *Nested                          `protobuf:\"bytes,6,opt,name=nested\" json:\"nested,omitempty\"`\n\tTerrain      map[string]*Nested               `protobuf:\"bytes,10,rep,name=terrain\" json:\"terrain,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tProto2Field  *testdata.SubDefaults            `protobuf:\"bytes,11,opt,name=proto2_field\" json:\"proto2_field,omitempty\"`\n\tProto2Value  map[string]*testdata.SubDefaults `protobuf:\"bytes,13,rep,name=proto2_value\" json:\"proto2_value,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n}\n\nfunc (m *Message) Reset()         { *m = Message{} }\nfunc (m *Message) String() string { return proto.CompactTextString(m) }\nfunc (*Message) ProtoMessage()    {}\n\nfunc (m *Message) GetNested() *Nested {\n\tif m != nil {\n\t\treturn m.Nested\n\t}\n\treturn nil\n}\n\nfunc (m *Message) GetTerrain() map[string]*Nested {\n\tif m != nil {\n\t\treturn m.Terrain\n\t}\n\treturn nil\n}\n\nfunc (m *Message) GetProto2Field() *testdata.SubDefaults {\n\tif m != nil {\n\t\treturn m.Proto2Field\n\t}\n\treturn nil\n}\n\nfunc (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {\n\tif m != nil {\n\t\treturn m.Proto2Value\n\t}\n\treturn nil\n}\n\ntype Nested struct {\n\tBunny string `protobuf:\"bytes,1,opt,name=bunny\" json:\"bunny,omitempty\"`\n}\n\nfunc (m *Nested) Reset()         { *m = Nested{} }\nfunc (m *Nested) String() string { return proto.CompactTextString(m) }\nfunc (*Nested) ProtoMessage()    {}\n\ntype MessageWithMap struct {\n\tByteMapping map[bool][]byte `protobuf:\"bytes,1,rep,name=byte_mapping\" json:\"byte_mapping,omitempty\" protobuf_key:\"varint,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value,proto3\"`\n}\n\nfunc (m *MessageWithMap) Reset()         { *m = MessageWithMap{} }\nfunc (m *MessageWithMap) String() string { return proto.CompactTextString(m) }\nfunc (*MessageWithMap) ProtoMessage()    {}\n\nfunc (m *MessageWithMap) GetByteMapping() map[bool][]byte {\n\tif m != nil {\n\t\treturn m.ByteMapping\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"proto3_proto.Message_Humour\", Message_Humour_name, Message_Humour_value)\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\nimport \"testdata/test.proto\";\n\npackage proto3_proto;\n\nmessage Message {\n  enum Humour {\n    UNKNOWN = 0;\n    PUNS = 1;\n    SLAPSTICK = 2;\n    BILL_BAILEY = 3;\n  }\n\n  string name = 1;\n  Humour hilarity = 2;\n  uint32 height_in_cm = 3;\n  bytes data = 4;\n  int64 result_count = 7;\n  bool true_scotsman = 8;\n  float score = 9;\n\n  repeated uint64 key = 5;\n  Nested nested = 6;\n\n  map<string, Nested> terrain = 10;\n  testdata.SubDefaults proto2_field = 11;\n  map<string, testdata.SubDefaults> proto2_value = 13;\n}\n\nmessage Nested {\n  string bunny = 1;\n}\n\nmessage MessageWithMap {\n  map<bool, bytes> byte_mapping = 1;\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/proto3_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tpb \"github.com/golang/protobuf/proto/proto3_proto\"\n\ttpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\nfunc TestProto3ZeroValues(t *testing.T) {\n\ttests := []struct {\n\t\tdesc string\n\t\tm    proto.Message\n\t}{\n\t\t{\"zero message\", &pb.Message{}},\n\t\t{\"empty bytes field\", &pb.Message{Data: []byte{}}},\n\t}\n\tfor _, test := range tests {\n\t\tb, err := proto.Marshal(test.m)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: proto.Marshal: %v\", test.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(b) > 0 {\n\t\t\tt.Errorf(\"%s: Encoding is non-empty: %q\", test.desc, b)\n\t\t}\n\t}\n}\n\nfunc TestRoundTripProto3(t *testing.T) {\n\tm := &pb.Message{\n\t\tName:         \"David\",          // (2 | 1<<3): 0x0a 0x05 \"David\"\n\t\tHilarity:     pb.Message_PUNS,  // (0 | 2<<3): 0x10 0x01\n\t\tHeightInCm:   178,              // (0 | 3<<3): 0x18 0xb2 0x01\n\t\tData:         []byte(\"roboto\"), // (2 | 4<<3): 0x20 0x06 \"roboto\"\n\t\tResultCount:  47,               // (0 | 7<<3): 0x38 0x2f\n\t\tTrueScotsman: true,             // (0 | 8<<3): 0x40 0x01\n\t\tScore:        8.1,              // (5 | 9<<3): 0x4d <8.1>\n\n\t\tKey: []uint64{1, 0xdeadbeef},\n\t\tNested: &pb.Nested{\n\t\t\tBunny: \"Monty\",\n\t\t},\n\t}\n\tt.Logf(\" m: %v\", m)\n\n\tb, err := proto.Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"proto.Marshal: %v\", err)\n\t}\n\tt.Logf(\" b: %q\", b)\n\n\tm2 := new(pb.Message)\n\tif err := proto.Unmarshal(b, m2); err != nil {\n\t\tt.Fatalf(\"proto.Unmarshal: %v\", err)\n\t}\n\tt.Logf(\"m2: %v\", m2)\n\n\tif !proto.Equal(m, m2) {\n\t\tt.Errorf(\"proto.Equal returned false:\\n m: %v\\nm2: %v\", m, m2)\n\t}\n}\n\nfunc TestProto3SetDefaults(t *testing.T) {\n\tin := &pb.Message{\n\t\tTerrain: map[string]*pb.Nested{\n\t\t\t\"meadow\": new(pb.Nested),\n\t\t},\n\t\tProto2Field: new(tpb.SubDefaults),\n\t\tProto2Value: map[string]*tpb.SubDefaults{\n\t\t\t\"badlands\": new(tpb.SubDefaults),\n\t\t},\n\t}\n\n\tgot := proto.Clone(in).(*pb.Message)\n\tproto.SetDefaults(got)\n\n\t// There are no defaults in proto3.  Everything should be the zero value, but\n\t// we need to remember to set defaults for nested proto2 messages.\n\twant := &pb.Message{\n\t\tTerrain: map[string]*pb.Nested{\n\t\t\t\"meadow\": new(pb.Nested),\n\t\t},\n\t\tProto2Field: &tpb.SubDefaults{N: proto.Int64(7)},\n\t\tProto2Value: map[string]*tpb.SubDefaults{\n\t\t\t\"badlands\": &tpb.SubDefaults{N: proto.Int64(7)},\n\t\t},\n\t}\n\n\tif !proto.Equal(got, want) {\n\t\tt.Errorf(\"with in = %v\\nproto.SetDefaults(in) =>\\ngot %v\\nwant %v\", in, got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/size2_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\nimport (\n\t\"testing\"\n)\n\n// This is a separate file and package from size_test.go because that one uses\n// generated messages and thus may not be in package proto without having a circular\n// dependency, whereas this file tests unexported details of size.go.\n\nfunc TestVarintSize(t *testing.T) {\n\t// Check the edge cases carefully.\n\ttestCases := []struct {\n\t\tn    uint64\n\t\tsize int\n\t}{\n\t\t{0, 1},\n\t\t{1, 1},\n\t\t{127, 1},\n\t\t{128, 2},\n\t\t{16383, 2},\n\t\t{16384, 3},\n\t\t{1<<63 - 1, 9},\n\t\t{1 << 63, 10},\n\t}\n\tfor _, tc := range testCases {\n\t\tsize := sizeVarint(tc.n)\n\t\tif size != tc.size {\n\t\t\tt.Errorf(\"sizeVarint(%d) = %d, want %d\", tc.n, size, tc.size)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/size_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"log\"\n\t\"strings\"\n\t\"testing\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\nvar messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}\n\n// messageWithExtension2 is in equal_test.go.\nvar messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}\n\nfunc init() {\n\tif err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String(\"Abbott\")}); err != nil {\n\t\tlog.Panicf(\"SetExtension: %v\", err)\n\t}\n\tif err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String(\"Costello\")}); err != nil {\n\t\tlog.Panicf(\"SetExtension: %v\", err)\n\t}\n\n\t// Force messageWithExtension3 to have the extension encoded.\n\tMarshal(messageWithExtension3)\n\n}\n\nvar SizeTests = []struct {\n\tdesc string\n\tpb   Message\n}{\n\t{\"empty\", &pb.OtherMessage{}},\n\t// Basic types.\n\t{\"bool\", &pb.Defaults{F_Bool: Bool(true)}},\n\t{\"int32\", &pb.Defaults{F_Int32: Int32(12)}},\n\t{\"negative int32\", &pb.Defaults{F_Int32: Int32(-1)}},\n\t{\"small int64\", &pb.Defaults{F_Int64: Int64(1)}},\n\t{\"big int64\", &pb.Defaults{F_Int64: Int64(1 << 20)}},\n\t{\"negative int64\", &pb.Defaults{F_Int64: Int64(-1)}},\n\t{\"fixed32\", &pb.Defaults{F_Fixed32: Uint32(71)}},\n\t{\"fixed64\", &pb.Defaults{F_Fixed64: Uint64(72)}},\n\t{\"uint32\", &pb.Defaults{F_Uint32: Uint32(123)}},\n\t{\"uint64\", &pb.Defaults{F_Uint64: Uint64(124)}},\n\t{\"float\", &pb.Defaults{F_Float: Float32(12.6)}},\n\t{\"double\", &pb.Defaults{F_Double: Float64(13.9)}},\n\t{\"string\", &pb.Defaults{F_String: String(\"niles\")}},\n\t{\"bytes\", &pb.Defaults{F_Bytes: []byte(\"wowsa\")}},\n\t{\"bytes, empty\", &pb.Defaults{F_Bytes: []byte{}}},\n\t{\"sint32\", &pb.Defaults{F_Sint32: Int32(65)}},\n\t{\"sint64\", &pb.Defaults{F_Sint64: Int64(67)}},\n\t{\"enum\", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},\n\t// Repeated.\n\t{\"empty repeated bool\", &pb.MoreRepeated{Bools: []bool{}}},\n\t{\"repeated bool\", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},\n\t{\"packed repeated bool\", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},\n\t{\"repeated int32\", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},\n\t{\"repeated int32 packed\", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},\n\t{\"repeated int64 packed\", &pb.MoreRepeated{Int64SPacked: []int64{\n\t\t// Need enough large numbers to verify that the header is counting the number of bytes\n\t\t// for the field, not the number of elements.\n\t\t1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,\n\t\t1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,\n\t}}},\n\t{\"repeated string\", &pb.MoreRepeated{Strings: []string{\"r\", \"ken\", \"gri\"}}},\n\t{\"repeated fixed\", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},\n\t// Nested.\n\t{\"nested\", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String(\"whatever\")}}},\n\t{\"group\", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},\n\t// Other things.\n\t{\"unrecognized\", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},\n\t{\"extension (unencoded)\", messageWithExtension1},\n\t{\"extension (encoded)\", messageWithExtension3},\n\t// proto3 message\n\t{\"proto3 empty\", &proto3pb.Message{}},\n\t{\"proto3 bool\", &proto3pb.Message{TrueScotsman: true}},\n\t{\"proto3 int64\", &proto3pb.Message{ResultCount: 1}},\n\t{\"proto3 uint32\", &proto3pb.Message{HeightInCm: 123}},\n\t{\"proto3 float\", &proto3pb.Message{Score: 12.6}},\n\t{\"proto3 string\", &proto3pb.Message{Name: \"Snezana\"}},\n\t{\"proto3 bytes\", &proto3pb.Message{Data: []byte(\"wowsa\")}},\n\t{\"proto3 bytes, empty\", &proto3pb.Message{Data: []byte{}}},\n\t{\"proto3 enum\", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},\n\t{\"proto3 map field with empty bytes\", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},\n\n\t{\"map field\", &pb.MessageWithMap{NameMapping: map[int32]string{1: \"Rob\", 7: \"Andrew\"}}},\n\t{\"map field with message\", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},\n\t{\"map field with bytes\", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte(\"this time for sure\")}}},\n\t{\"map field with empty bytes\", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},\n\n\t{\"map field with big entry\", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat(\"x\", 125)}}},\n\t{\"map field with big key and val\", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat(\"x\", 70): strings.Repeat(\"y\", 70)}}},\n\t{\"map field with big numeric key\", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: \"om nom nom\"}}},\n}\n\nfunc TestSize(t *testing.T) {\n\tfor _, tc := range SizeTests {\n\t\tsize := Size(tc.pb)\n\t\tb, err := Marshal(tc.pb)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%v: Marshal failed: %v\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif size != len(b) {\n\t\t\tt.Errorf(\"%v: Size(%v) = %d, want %d\", tc.desc, tc.pb, size, len(b))\n\t\t\tt.Logf(\"%v: bytes: %#v\", tc.desc, b)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n// Functions for writing the text protocol buffer format.\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n)\n\nvar (\n\tnewline         = []byte(\"\\n\")\n\tspaces          = []byte(\"                                        \")\n\tgtNewline       = []byte(\">\\n\")\n\tendBraceNewline = []byte(\"}\\n\")\n\tbackslashN      = []byte{'\\\\', 'n'}\n\tbackslashR      = []byte{'\\\\', 'r'}\n\tbackslashT      = []byte{'\\\\', 't'}\n\tbackslashDQ     = []byte{'\\\\', '\"'}\n\tbackslashBS     = []byte{'\\\\', '\\\\'}\n\tposInf          = []byte(\"inf\")\n\tnegInf          = []byte(\"-inf\")\n\tnan             = []byte(\"nan\")\n)\n\ntype writer interface {\n\tio.Writer\n\tWriteByte(byte) error\n}\n\n// textWriter is an io.Writer that tracks its indentation level.\ntype textWriter struct {\n\tind      int\n\tcomplete bool // if the current position is a complete line\n\tcompact  bool // whether to write out as a one-liner\n\tw        writer\n}\n\nfunc (w *textWriter) WriteString(s string) (n int, err error) {\n\tif !strings.Contains(s, \"\\n\") {\n\t\tif !w.compact && w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tw.complete = false\n\t\treturn io.WriteString(w.w, s)\n\t}\n\t// WriteString is typically called without newlines, so this\n\t// codepath and its copy are rare.  We copy to avoid\n\t// duplicating all of Write's logic here.\n\treturn w.Write([]byte(s))\n}\n\nfunc (w *textWriter) Write(p []byte) (n int, err error) {\n\tnewlines := bytes.Count(p, newline)\n\tif newlines == 0 {\n\t\tif !w.compact && w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tn, err = w.w.Write(p)\n\t\tw.complete = false\n\t\treturn n, err\n\t}\n\n\tfrags := bytes.SplitN(p, newline, newlines+1)\n\tif w.compact {\n\t\tfor i, frag := range frags {\n\t\t\tif i > 0 {\n\t\t\t\tif err := w.w.WriteByte(' '); err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tn++\n\t\t\t}\n\t\t\tnn, err := w.w.Write(frag)\n\t\t\tn += nn\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\t\treturn n, nil\n\t}\n\n\tfor i, frag := range frags {\n\t\tif w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tnn, err := w.w.Write(frag)\n\t\tn += nn\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif i+1 < len(frags) {\n\t\t\tif err := w.w.WriteByte('\\n'); err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}\n\tw.complete = len(frags[len(frags)-1]) == 0\n\treturn n, nil\n}\n\nfunc (w *textWriter) WriteByte(c byte) error {\n\tif w.compact && c == '\\n' {\n\t\tc = ' '\n\t}\n\tif !w.compact && w.complete {\n\t\tw.writeIndent()\n\t}\n\terr := w.w.WriteByte(c)\n\tw.complete = c == '\\n'\n\treturn err\n}\n\nfunc (w *textWriter) indent() { w.ind++ }\n\nfunc (w *textWriter) unindent() {\n\tif w.ind == 0 {\n\t\tlog.Printf(\"proto: textWriter unindented too far\")\n\t\treturn\n\t}\n\tw.ind--\n}\n\nfunc writeName(w *textWriter, props *Properties) error {\n\tif _, err := w.WriteString(props.OrigName); err != nil {\n\t\treturn err\n\t}\n\tif props.Wire != \"group\" {\n\t\treturn w.WriteByte(':')\n\t}\n\treturn nil\n}\n\nvar (\n\tmessageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()\n)\n\n// raw is the interface satisfied by RawMessage.\ntype raw interface {\n\tBytes() []byte\n}\n\nfunc writeStruct(w *textWriter, sv reflect.Value) error {\n\tif sv.Type() == messageSetType {\n\t\treturn writeMessageSet(w, sv.Addr().Interface().(*MessageSet))\n\t}\n\n\tst := sv.Type()\n\tsprops := GetProperties(st)\n\tfor i := 0; i < sv.NumField(); i++ {\n\t\tfv := sv.Field(i)\n\t\tprops := sprops.Prop[i]\n\t\tname := st.Field(i).Name\n\n\t\tif strings.HasPrefix(name, \"XXX_\") {\n\t\t\t// There are two XXX_ fields:\n\t\t\t//   XXX_unrecognized []byte\n\t\t\t//   XXX_extensions   map[int32]proto.Extension\n\t\t\t// The first is handled here;\n\t\t\t// the second is handled at the bottom of this function.\n\t\t\tif name == \"XXX_unrecognized\" && !fv.IsNil() {\n\t\t\t\tif err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Ptr && fv.IsNil() {\n\t\t\t// Field not filled in. This could be an optional field or\n\t\t\t// a required field that wasn't filled in. Either way, there\n\t\t\t// isn't anything we can show for it.\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Slice && fv.IsNil() {\n\t\t\t// Repeated field that is empty, or a bytes field that is unused.\n\t\t\tcontinue\n\t\t}\n\n\t\tif props.Repeated && fv.Kind() == reflect.Slice {\n\t\t\t// Repeated field.\n\t\t\tfor j := 0; j < fv.Len(); j++ {\n\t\t\t\tif err := writeName(w, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tv := fv.Index(j)\n\t\t\t\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\t\t\t\t// A nil message in a repeated field is not valid,\n\t\t\t\t\t// but we can handle that more gracefully than panicking.\n\t\t\t\t\tif _, err := w.Write([]byte(\"<nil>\\n\")); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := writeAny(w, v, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Map {\n\t\t\t// Map fields are rendered as a repeated struct with key/value fields.\n\t\t\tkeys := fv.MapKeys() // TODO: should we sort these for deterministic output?\n\t\t\tsort.Sort(mapKeys(keys))\n\t\t\tfor _, key := range keys {\n\t\t\t\tval := fv.MapIndex(key)\n\t\t\t\tif err := writeName(w, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// open struct\n\t\t\t\tif err := w.WriteByte('<'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tw.indent()\n\t\t\t\t// key\n\t\t\t\tif _, err := w.WriteString(\"key:\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err := writeAny(w, key, props.mkeyprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// value\n\t\t\t\tif _, err := w.WriteString(\"value:\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err := writeAny(w, val, props.mvalprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// close struct\n\t\t\t\tw.unindent()\n\t\t\t\tif err := w.WriteByte('>'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {\n\t\t\t// empty bytes field\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {\n\t\t\t// proto3 non-repeated scalar field; skip if zero value\n\t\t\tswitch fv.Kind() {\n\t\t\tcase reflect.Bool:\n\t\t\t\tif !fv.Bool() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.Int32, reflect.Int64:\n\t\t\t\tif fv.Int() == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.Uint32, reflect.Uint64:\n\t\t\t\tif fv.Uint() == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tif fv.Float() == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase reflect.String:\n\t\t\t\tif fv.String() == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := writeName(w, props); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !w.compact {\n\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif b, ok := fv.Interface().(raw); ok {\n\t\t\tif err := writeRaw(w, b.Bytes()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Enums have a String method, so writeAny will work fine.\n\t\tif err := writeAny(w, fv, props); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Extensions (the XXX_extensions field).\n\tpv := sv.Addr()\n\tif pv.Type().Implements(extendableProtoType) {\n\t\tif err := writeExtensions(w, pv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// writeRaw writes an uninterpreted raw message.\nfunc writeRaw(w *textWriter, b []byte) error {\n\tif err := w.WriteByte('<'); err != nil {\n\t\treturn err\n\t}\n\tif !w.compact {\n\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tw.indent()\n\tif err := writeUnknownStruct(w, b); err != nil {\n\t\treturn err\n\t}\n\tw.unindent()\n\tif err := w.WriteByte('>'); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// writeAny writes an arbitrary field.\nfunc writeAny(w *textWriter, v reflect.Value, props *Properties) error {\n\tv = reflect.Indirect(v)\n\n\t// Floats have special cases.\n\tif v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {\n\t\tx := v.Float()\n\t\tvar b []byte\n\t\tswitch {\n\t\tcase math.IsInf(x, 1):\n\t\t\tb = posInf\n\t\tcase math.IsInf(x, -1):\n\t\t\tb = negInf\n\t\tcase math.IsNaN(x):\n\t\t\tb = nan\n\t\t}\n\t\tif b != nil {\n\t\t\t_, err := w.Write(b)\n\t\t\treturn err\n\t\t}\n\t\t// Other values are handled below.\n\t}\n\n\t// We don't attempt to serialise every possible value type; only those\n\t// that can occur in protocol buffers.\n\tswitch v.Kind() {\n\tcase reflect.Slice:\n\t\t// Should only be a []byte; repeated fields are handled in writeStruct.\n\t\tif err := writeString(w, string(v.Interface().([]byte))); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.String:\n\t\tif err := writeString(w, v.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.Struct:\n\t\t// Required/optional group/message.\n\t\tvar bra, ket byte = '<', '>'\n\t\tif props != nil && props.Wire == \"group\" {\n\t\t\tbra, ket = '{', '}'\n\t\t}\n\t\tif err := w.WriteByte(bra); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !w.compact {\n\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tw.indent()\n\t\tif tm, ok := v.Interface().(encoding.TextMarshaler); ok {\n\t\t\ttext, err := tm.MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err = w.Write(text); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if err := writeStruct(w, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw.unindent()\n\t\tif err := w.WriteByte(ket); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\t_, err := fmt.Fprint(w, v.Interface())\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// equivalent to C's isprint.\nfunc isprint(c byte) bool {\n\treturn c >= 0x20 && c < 0x7f\n}\n\n// writeString writes a string in the protocol buffer text format.\n// It is similar to strconv.Quote except we don't use Go escape sequences,\n// we treat the string as a byte sequence, and we use octal escapes.\n// These differences are to maintain interoperability with the other\n// languages' implementations of the text format.\nfunc writeString(w *textWriter, s string) error {\n\t// use WriteByte here to get any needed indent\n\tif err := w.WriteByte('\"'); err != nil {\n\t\treturn err\n\t}\n\t// Loop over the bytes, not the runes.\n\tfor i := 0; i < len(s); i++ {\n\t\tvar err error\n\t\t// Divergence from C++: we don't escape apostrophes.\n\t\t// There's no need to escape them, and the C++ parser\n\t\t// copes with a naked apostrophe.\n\t\tswitch c := s[i]; c {\n\t\tcase '\\n':\n\t\t\t_, err = w.w.Write(backslashN)\n\t\tcase '\\r':\n\t\t\t_, err = w.w.Write(backslashR)\n\t\tcase '\\t':\n\t\t\t_, err = w.w.Write(backslashT)\n\t\tcase '\"':\n\t\t\t_, err = w.w.Write(backslashDQ)\n\t\tcase '\\\\':\n\t\t\t_, err = w.w.Write(backslashBS)\n\t\tdefault:\n\t\t\tif isprint(c) {\n\t\t\t\terr = w.w.WriteByte(c)\n\t\t\t} else {\n\t\t\t\t_, err = fmt.Fprintf(w.w, \"\\\\%03o\", c)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.WriteByte('\"')\n}\n\nfunc writeMessageSet(w *textWriter, ms *MessageSet) error {\n\tfor _, item := range ms.Item {\n\t\tid := *item.TypeId\n\t\tif msd, ok := messageSetMap[id]; ok {\n\t\t\t// Known message set type.\n\t\t\tif _, err := fmt.Fprintf(w, \"[%s]: <\\n\", msd.name); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.indent()\n\n\t\t\tpb := reflect.New(msd.t.Elem())\n\t\t\tif err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {\n\t\t\t\tif _, err := fmt.Fprintf(w, \"/* bad message: %v */\\n\", err); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := writeStruct(w, pb.Elem()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Unknown type.\n\t\t\tif _, err := fmt.Fprintf(w, \"[%d]: <\\n\", id); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.indent()\n\t\t\tif err := writeUnknownStruct(w, item.Message); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tw.unindent()\n\t\tif _, err := w.Write(gtNewline); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeUnknownStruct(w *textWriter, data []byte) (err error) {\n\tif !w.compact {\n\t\tif _, err := fmt.Fprintf(w, \"/* %d unknown bytes */\\n\", len(data)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tb := NewBuffer(data)\n\tfor b.index < len(b.buf) {\n\t\tx, err := b.DecodeVarint()\n\t\tif err != nil {\n\t\t\t_, err := fmt.Fprintf(w, \"/* %v */\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\twire, tag := x&7, x>>3\n\t\tif wire == WireEndGroup {\n\t\t\tw.unindent()\n\t\t\tif _, err := w.Write(endBraceNewline); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := fmt.Fprint(w, tag); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif wire != WireStartGroup {\n\t\t\tif err := w.WriteByte(':'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif !w.compact || wire == WireStartGroup {\n\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tswitch wire {\n\t\tcase WireBytes:\n\t\t\tbuf, e := b.DecodeRawBytes(false)\n\t\t\tif e == nil {\n\t\t\t\t_, err = fmt.Fprintf(w, \"%q\", buf)\n\t\t\t} else {\n\t\t\t\t_, err = fmt.Fprintf(w, \"/* %v */\", e)\n\t\t\t}\n\t\tcase WireFixed32:\n\t\t\tx, err = b.DecodeFixed32()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tcase WireFixed64:\n\t\t\tx, err = b.DecodeFixed64()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tcase WireStartGroup:\n\t\t\terr = w.WriteByte('{')\n\t\t\tw.indent()\n\t\tcase WireVarint:\n\t\t\tx, err = b.DecodeVarint()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tdefault:\n\t\t\t_, err = fmt.Fprintf(w, \"/* unknown wire type %d */\", wire)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeUnknownInt(w *textWriter, x uint64, err error) error {\n\tif err == nil {\n\t\t_, err = fmt.Fprint(w, x)\n\t} else {\n\t\t_, err = fmt.Fprintf(w, \"/* %v */\", err)\n\t}\n\treturn err\n}\n\ntype int32Slice []int32\n\nfunc (s int32Slice) Len() int           { return len(s) }\nfunc (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }\nfunc (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n\n// writeExtensions writes all the extensions in pv.\n// pv is assumed to be a pointer to a protocol message struct that is extendable.\nfunc writeExtensions(w *textWriter, pv reflect.Value) error {\n\temap := extensionMaps[pv.Type().Elem()]\n\tep := pv.Interface().(extendableProto)\n\n\t// Order the extensions by ID.\n\t// This isn't strictly necessary, but it will give us\n\t// canonical output, which will also make testing easier.\n\tm := ep.ExtensionMap()\n\tids := make([]int32, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, id)\n\t}\n\tsort.Sort(int32Slice(ids))\n\n\tfor _, extNum := range ids {\n\t\text := m[extNum]\n\t\tvar desc *ExtensionDesc\n\t\tif emap != nil {\n\t\t\tdesc = emap[extNum]\n\t\t}\n\t\tif desc == nil {\n\t\t\t// Unknown extension.\n\t\t\tif err := writeUnknownStruct(w, ext.enc); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tpb, err := GetExtension(ep, desc)\n\t\tif err != nil {\n\t\t\tif _, err := fmt.Fprintln(os.Stderr, \"proto: failed getting extension: \", err); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Repeated extensions will appear as a slice.\n\t\tif !desc.repeated() {\n\t\t\tif err := writeExtension(w, desc.Name, pb); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tv := reflect.ValueOf(pb)\n\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\tif err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeExtension(w *textWriter, name string, pb interface{}) error {\n\tif _, err := fmt.Fprintf(w, \"[%s]:\", name); err != nil {\n\t\treturn err\n\t}\n\tif !w.compact {\n\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {\n\t\treturn err\n\t}\n\tif err := w.WriteByte('\\n'); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (w *textWriter) writeIndent() {\n\tif !w.complete {\n\t\treturn\n\t}\n\tremain := w.ind * 2\n\tfor remain > 0 {\n\t\tn := remain\n\t\tif n > len(spaces) {\n\t\t\tn = len(spaces)\n\t\t}\n\t\tw.w.Write(spaces[:n])\n\t\tremain -= n\n\t}\n\tw.complete = false\n}\n\nfunc marshalText(w io.Writer, pb Message, compact bool) error {\n\tval := reflect.ValueOf(pb)\n\tif pb == nil || val.IsNil() {\n\t\tw.Write([]byte(\"<nil>\"))\n\t\treturn nil\n\t}\n\tvar bw *bufio.Writer\n\tww, ok := w.(writer)\n\tif !ok {\n\t\tbw = bufio.NewWriter(w)\n\t\tww = bw\n\t}\n\taw := &textWriter{\n\t\tw:        ww,\n\t\tcomplete: true,\n\t\tcompact:  compact,\n\t}\n\n\tif tm, ok := pb.(encoding.TextMarshaler); ok {\n\t\ttext, err := tm.MarshalText()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err = aw.Write(text); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif bw != nil {\n\t\t\treturn bw.Flush()\n\t\t}\n\t\treturn nil\n\t}\n\t// Dereference the received pointer so we don't have outer < and >.\n\tv := reflect.Indirect(val)\n\tif err := writeStruct(aw, v); err != nil {\n\t\treturn err\n\t}\n\tif bw != nil {\n\t\treturn bw.Flush()\n\t}\n\treturn nil\n}\n\n// MarshalText writes a given protocol buffer in text format.\n// The only errors returned are from w.\nfunc MarshalText(w io.Writer, pb Message) error {\n\treturn marshalText(w, pb, false)\n}\n\n// MarshalTextString is the same as MarshalText, but returns the string directly.\nfunc MarshalTextString(pb Message) string {\n\tvar buf bytes.Buffer\n\tmarshalText(&buf, pb, false)\n\treturn buf.String()\n}\n\n// CompactText writes a given protocol buffer in compact text format (one line).\nfunc CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }\n\n// CompactTextString is the same as CompactText, but returns the string directly.\nfunc CompactTextString(pb Message) string {\n\tvar buf bytes.Buffer\n\tmarshalText(&buf, pb, true)\n\treturn buf.String()\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text_parser.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n// Functions for parsing the Text protocol buffer format.\n// TODO: message sets.\n\nimport (\n\t\"encoding\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\ntype ParseError struct {\n\tMessage string\n\tLine    int // 1-based line number\n\tOffset  int // 0-based byte offset from start of input\n}\n\nfunc (p *ParseError) Error() string {\n\tif p.Line == 1 {\n\t\t// show offset only for first line\n\t\treturn fmt.Sprintf(\"line 1.%d: %v\", p.Offset, p.Message)\n\t}\n\treturn fmt.Sprintf(\"line %d: %v\", p.Line, p.Message)\n}\n\ntype token struct {\n\tvalue    string\n\terr      *ParseError\n\tline     int    // line number\n\toffset   int    // byte number from start of input, not start of line\n\tunquoted string // the unquoted version of value, if it was a quoted string\n}\n\nfunc (t *token) String() string {\n\tif t.err == nil {\n\t\treturn fmt.Sprintf(\"%q (line=%d, offset=%d)\", t.value, t.line, t.offset)\n\t}\n\treturn fmt.Sprintf(\"parse error: %v\", t.err)\n}\n\ntype textParser struct {\n\ts            string // remaining input\n\tdone         bool   // whether the parsing is finished (success or error)\n\tbacked       bool   // whether back() was called\n\toffset, line int\n\tcur          token\n}\n\nfunc newTextParser(s string) *textParser {\n\tp := new(textParser)\n\tp.s = s\n\tp.line = 1\n\tp.cur.line = 1\n\treturn p\n}\n\nfunc (p *textParser) errorf(format string, a ...interface{}) *ParseError {\n\tpe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}\n\tp.cur.err = pe\n\tp.done = true\n\treturn pe\n}\n\n// Numbers and identifiers are matched by [-+._A-Za-z0-9]\nfunc isIdentOrNumberChar(c byte) bool {\n\tswitch {\n\tcase 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':\n\t\treturn true\n\tcase '0' <= c && c <= '9':\n\t\treturn true\n\t}\n\tswitch c {\n\tcase '-', '+', '.', '_':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isWhitespace(c byte) bool {\n\tswitch c {\n\tcase ' ', '\\t', '\\n', '\\r':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (p *textParser) skipWhitespace() {\n\ti := 0\n\tfor i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {\n\t\tif p.s[i] == '#' {\n\t\t\t// comment; skip to end of line or input\n\t\t\tfor i < len(p.s) && p.s[i] != '\\n' {\n\t\t\t\ti++\n\t\t\t}\n\t\t\tif i == len(p.s) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif p.s[i] == '\\n' {\n\t\t\tp.line++\n\t\t}\n\t\ti++\n\t}\n\tp.offset += i\n\tp.s = p.s[i:len(p.s)]\n\tif len(p.s) == 0 {\n\t\tp.done = true\n\t}\n}\n\nfunc (p *textParser) advance() {\n\t// Skip whitespace\n\tp.skipWhitespace()\n\tif p.done {\n\t\treturn\n\t}\n\n\t// Start of non-whitespace\n\tp.cur.err = nil\n\tp.cur.offset, p.cur.line = p.offset, p.line\n\tp.cur.unquoted = \"\"\n\tswitch p.s[0] {\n\tcase '<', '>', '{', '}', ':', '[', ']', ';', ',':\n\t\t// Single symbol\n\t\tp.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]\n\tcase '\"', '\\'':\n\t\t// Quoted string\n\t\ti := 1\n\t\tfor i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\\n' {\n\t\t\tif p.s[i] == '\\\\' && i+1 < len(p.s) {\n\t\t\t\t// skip escaped char\n\t\t\t\ti++\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tif i >= len(p.s) || p.s[i] != p.s[0] {\n\t\t\tp.errorf(\"unmatched quote\")\n\t\t\treturn\n\t\t}\n\t\tunq, err := unquoteC(p.s[1:i], rune(p.s[0]))\n\t\tif err != nil {\n\t\t\tp.errorf(\"invalid quoted string %v\", p.s[0:i+1])\n\t\t\treturn\n\t\t}\n\t\tp.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]\n\t\tp.cur.unquoted = unq\n\tdefault:\n\t\ti := 0\n\t\tfor i < len(p.s) && isIdentOrNumberChar(p.s[i]) {\n\t\t\ti++\n\t\t}\n\t\tif i == 0 {\n\t\t\tp.errorf(\"unexpected byte %#x\", p.s[0])\n\t\t\treturn\n\t\t}\n\t\tp.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]\n\t}\n\tp.offset += len(p.cur.value)\n}\n\nvar (\n\terrBadUTF8 = errors.New(\"proto: bad UTF-8\")\n\terrBadHex  = errors.New(\"proto: bad hexadecimal\")\n)\n\nfunc unquoteC(s string, quote rune) (string, error) {\n\t// This is based on C++'s tokenizer.cc.\n\t// Despite its name, this is *not* parsing C syntax.\n\t// For instance, \"\\0\" is an invalid quoted string.\n\n\t// Avoid allocation in trivial cases.\n\tsimple := true\n\tfor _, r := range s {\n\t\tif r == '\\\\' || r == quote {\n\t\t\tsimple = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif simple {\n\t\treturn s, nil\n\t}\n\n\tbuf := make([]byte, 0, 3*len(s)/2)\n\tfor len(s) > 0 {\n\t\tr, n := utf8.DecodeRuneInString(s)\n\t\tif r == utf8.RuneError && n == 1 {\n\t\t\treturn \"\", errBadUTF8\n\t\t}\n\t\ts = s[n:]\n\t\tif r != '\\\\' {\n\t\t\tif r < utf8.RuneSelf {\n\t\t\t\tbuf = append(buf, byte(r))\n\t\t\t} else {\n\t\t\t\tbuf = append(buf, string(r)...)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tch, tail, err := unescape(s)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbuf = append(buf, ch...)\n\t\ts = tail\n\t}\n\treturn string(buf), nil\n}\n\nfunc unescape(s string) (ch string, tail string, err error) {\n\tr, n := utf8.DecodeRuneInString(s)\n\tif r == utf8.RuneError && n == 1 {\n\t\treturn \"\", \"\", errBadUTF8\n\t}\n\ts = s[n:]\n\tswitch r {\n\tcase 'a':\n\t\treturn \"\\a\", s, nil\n\tcase 'b':\n\t\treturn \"\\b\", s, nil\n\tcase 'f':\n\t\treturn \"\\f\", s, nil\n\tcase 'n':\n\t\treturn \"\\n\", s, nil\n\tcase 'r':\n\t\treturn \"\\r\", s, nil\n\tcase 't':\n\t\treturn \"\\t\", s, nil\n\tcase 'v':\n\t\treturn \"\\v\", s, nil\n\tcase '?':\n\t\treturn \"?\", s, nil // trigraph workaround\n\tcase '\\'', '\"', '\\\\':\n\t\treturn string(r), s, nil\n\tcase '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':\n\t\tif len(s) < 2 {\n\t\t\treturn \"\", \"\", fmt.Errorf(`\\%c requires 2 following digits`, r)\n\t\t}\n\t\tbase := 8\n\t\tss := s[:2]\n\t\ts = s[2:]\n\t\tif r == 'x' || r == 'X' {\n\t\t\tbase = 16\n\t\t} else {\n\t\t\tss = string(r) + ss\n\t\t}\n\t\ti, err := strconv.ParseUint(ss, base, 8)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn string([]byte{byte(i)}), s, nil\n\tcase 'u', 'U':\n\t\tn := 4\n\t\tif r == 'U' {\n\t\t\tn = 8\n\t\t}\n\t\tif len(s) < n {\n\t\t\treturn \"\", \"\", fmt.Errorf(`\\%c requires %d digits`, r, n)\n\t\t}\n\n\t\tbs := make([]byte, n/2)\n\t\tfor i := 0; i < n; i += 2 {\n\t\t\ta, ok1 := unhex(s[i])\n\t\t\tb, ok2 := unhex(s[i+1])\n\t\t\tif !ok1 || !ok2 {\n\t\t\t\treturn \"\", \"\", errBadHex\n\t\t\t}\n\t\t\tbs[i/2] = a<<4 | b\n\t\t}\n\t\ts = s[n:]\n\t\treturn string(bs), s, nil\n\t}\n\treturn \"\", \"\", fmt.Errorf(`unknown escape \\%c`, r)\n}\n\n// Adapted from src/pkg/strconv/quote.go.\nfunc unhex(b byte) (v byte, ok bool) {\n\tswitch {\n\tcase '0' <= b && b <= '9':\n\t\treturn b - '0', true\n\tcase 'a' <= b && b <= 'f':\n\t\treturn b - 'a' + 10, true\n\tcase 'A' <= b && b <= 'F':\n\t\treturn b - 'A' + 10, true\n\t}\n\treturn 0, false\n}\n\n// Back off the parser by one token. Can only be done between calls to next().\n// It makes the next advance() a no-op.\nfunc (p *textParser) back() { p.backed = true }\n\n// Advances the parser and returns the new current token.\nfunc (p *textParser) next() *token {\n\tif p.backed || p.done {\n\t\tp.backed = false\n\t\treturn &p.cur\n\t}\n\tp.advance()\n\tif p.done {\n\t\tp.cur.value = \"\"\n\t} else if len(p.cur.value) > 0 && p.cur.value[0] == '\"' {\n\t\t// Look for multiple quoted strings separated by whitespace,\n\t\t// and concatenate them.\n\t\tcat := p.cur\n\t\tfor {\n\t\t\tp.skipWhitespace()\n\t\t\tif p.done || p.s[0] != '\"' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tp.advance()\n\t\t\tif p.cur.err != nil {\n\t\t\t\treturn &p.cur\n\t\t\t}\n\t\t\tcat.value += \" \" + p.cur.value\n\t\t\tcat.unquoted += p.cur.unquoted\n\t\t}\n\t\tp.done = false // parser may have seen EOF, but we want to return cat\n\t\tp.cur = cat\n\t}\n\treturn &p.cur\n}\n\nfunc (p *textParser) consumeToken(s string) error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != s {\n\t\tp.back()\n\t\treturn p.errorf(\"expected %q, found %q\", s, tok.value)\n\t}\n\treturn nil\n}\n\n// Return a RequiredNotSetError indicating which required field was not set.\nfunc (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {\n\tst := sv.Type()\n\tsprops := GetProperties(st)\n\tfor i := 0; i < st.NumField(); i++ {\n\t\tif !isNil(sv.Field(i)) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprops := sprops.Prop[i]\n\t\tif props.Required {\n\t\t\treturn &RequiredNotSetError{fmt.Sprintf(\"%v.%v\", st, props.OrigName)}\n\t\t}\n\t}\n\treturn &RequiredNotSetError{fmt.Sprintf(\"%v.<unknown field name>\", st)} // should not happen\n}\n\n// Returns the index in the struct for the named field, as well as the parsed tag properties.\nfunc structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {\n\tsprops := GetProperties(st)\n\ti, ok := sprops.decoderOrigNames[name]\n\tif ok {\n\t\treturn i, sprops.Prop[i], true\n\t}\n\treturn -1, nil, false\n}\n\n// Consume a ':' from the input stream (if the next token is a colon),\n// returning an error if a colon is needed but not present.\nfunc (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != \":\" {\n\t\t// Colon is optional when the field is a group or message.\n\t\tneedColon := true\n\t\tswitch props.Wire {\n\t\tcase \"group\":\n\t\t\tneedColon = false\n\t\tcase \"bytes\":\n\t\t\t// A \"bytes\" field is either a message, a string, or a repeated field;\n\t\t\t// those three become *T, *string and []T respectively, so we can check for\n\t\t\t// this field being a pointer to a non-string.\n\t\t\tif typ.Kind() == reflect.Ptr {\n\t\t\t\t// *T or *string\n\t\t\t\tif typ.Elem().Kind() == reflect.String {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if typ.Kind() == reflect.Slice {\n\t\t\t\t// []T or []*T\n\t\t\t\tif typ.Elem().Kind() != reflect.Ptr {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if typ.Kind() == reflect.String {\n\t\t\t\t// The proto3 exception is for a string field,\n\t\t\t\t// which requires a colon.\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tneedColon = false\n\t\t}\n\t\tif needColon {\n\t\t\treturn p.errorf(\"expected ':', found %q\", tok.value)\n\t\t}\n\t\tp.back()\n\t}\n\treturn nil\n}\n\nfunc (p *textParser) readStruct(sv reflect.Value, terminator string) error {\n\tst := sv.Type()\n\treqCount := GetProperties(st).reqCount\n\tvar reqFieldErr error\n\tfieldSet := make(map[string]bool)\n\t// A struct is a sequence of \"name: value\", terminated by one of\n\t// '>' or '}', or the end of the input.  A name may also be\n\t// \"[extension]\".\n\tfor {\n\t\ttok := p.next()\n\t\tif tok.err != nil {\n\t\t\treturn tok.err\n\t\t}\n\t\tif tok.value == terminator {\n\t\t\tbreak\n\t\t}\n\t\tif tok.value == \"[\" {\n\t\t\t// Looks like an extension.\n\t\t\t//\n\t\t\t// TODO: Check whether we need to handle\n\t\t\t// namespace rooted names (e.g. \".something.Foo\").\n\t\t\ttok = p.next()\n\t\t\tif tok.err != nil {\n\t\t\t\treturn tok.err\n\t\t\t}\n\t\t\tvar desc *ExtensionDesc\n\t\t\t// This could be faster, but it's functional.\n\t\t\t// TODO: Do something smarter than a linear scan.\n\t\t\tfor _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {\n\t\t\t\tif d.Name == tok.value {\n\t\t\t\t\tdesc = d\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif desc == nil {\n\t\t\t\treturn p.errorf(\"unrecognized extension %q\", tok.value)\n\t\t\t}\n\t\t\t// Check the extension terminator.\n\t\t\ttok = p.next()\n\t\t\tif tok.err != nil {\n\t\t\t\treturn tok.err\n\t\t\t}\n\t\t\tif tok.value != \"]\" {\n\t\t\t\treturn p.errorf(\"unrecognized extension terminator %q\", tok.value)\n\t\t\t}\n\n\t\t\tprops := &Properties{}\n\t\t\tprops.Parse(desc.Tag)\n\n\t\t\ttyp := reflect.TypeOf(desc.ExtensionType)\n\t\t\tif err := p.checkForColon(props, typ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\trep := desc.repeated()\n\n\t\t\t// Read the extension structure, and set it in\n\t\t\t// the value we're constructing.\n\t\t\tvar ext reflect.Value\n\t\t\tif !rep {\n\t\t\t\text = reflect.New(typ).Elem()\n\t\t\t} else {\n\t\t\t\text = reflect.New(typ.Elem()).Elem()\n\t\t\t}\n\t\t\tif err := p.readAny(ext, props); err != nil {\n\t\t\t\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqFieldErr = err\n\t\t\t}\n\t\t\tep := sv.Addr().Interface().(extendableProto)\n\t\t\tif !rep {\n\t\t\t\tSetExtension(ep, desc, ext.Interface())\n\t\t\t} else {\n\t\t\t\told, err := GetExtension(ep, desc)\n\t\t\t\tvar sl reflect.Value\n\t\t\t\tif err == nil {\n\t\t\t\t\tsl = reflect.ValueOf(old) // existing slice\n\t\t\t\t} else {\n\t\t\t\t\tsl = reflect.MakeSlice(typ, 0, 1)\n\t\t\t\t}\n\t\t\t\tsl = reflect.Append(sl, ext)\n\t\t\t\tSetExtension(ep, desc, sl.Interface())\n\t\t\t}\n\t\t} else {\n\t\t\t// This is a normal, non-extension field.\n\t\t\tname := tok.value\n\t\t\tfi, props, ok := structFieldByName(st, name)\n\t\t\tif !ok {\n\t\t\t\treturn p.errorf(\"unknown field name %q in %v\", name, st)\n\t\t\t}\n\n\t\t\tdst := sv.Field(fi)\n\n\t\t\tif dst.Kind() == reflect.Map {\n\t\t\t\t// Consume any colon.\n\t\t\t\tif err := p.checkForColon(props, dst.Type()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Construct the map if it doesn't already exist.\n\t\t\t\tif dst.IsNil() {\n\t\t\t\t\tdst.Set(reflect.MakeMap(dst.Type()))\n\t\t\t\t}\n\t\t\t\tkey := reflect.New(dst.Type().Key()).Elem()\n\t\t\t\tval := reflect.New(dst.Type().Elem()).Elem()\n\n\t\t\t\t// The map entry should be this sequence of tokens:\n\t\t\t\t//\t< key : KEY value : VALUE >\n\t\t\t\t// Technically the \"key\" and \"value\" could come in any order,\n\t\t\t\t// but in practice they won't.\n\n\t\t\t\ttok := p.next()\n\t\t\t\tvar terminator string\n\t\t\t\tswitch tok.value {\n\t\t\t\tcase \"<\":\n\t\t\t\t\tterminator = \">\"\n\t\t\t\tcase \"{\":\n\t\t\t\t\tterminator = \"}\"\n\t\t\t\tdefault:\n\t\t\t\t\treturn p.errorf(\"expected '{' or '<', found %q\", tok.value)\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(\"key\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(\":\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.readAny(key, props.mkeyprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(\"value\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.readAny(val, props.mvalprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := p.consumeToken(terminator); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tdst.SetMapIndex(key, val)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check that it's not already set if it's not a repeated field.\n\t\t\tif !props.Repeated && fieldSet[name] {\n\t\t\t\treturn p.errorf(\"non-repeated field %q was repeated\", name)\n\t\t\t}\n\n\t\t\tif err := p.checkForColon(props, st.Field(fi).Type); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Parse into the field.\n\t\t\tfieldSet[name] = true\n\t\t\tif err := p.readAny(dst, props); err != nil {\n\t\t\t\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqFieldErr = err\n\t\t\t} else if props.Required {\n\t\t\t\treqCount--\n\t\t\t}\n\t\t}\n\n\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif reqCount > 0 {\n\t\treturn p.missingRequiredFieldError(sv)\n\t}\n\treturn reqFieldErr\n}\n\n// consumeOptionalSeparator consumes an optional semicolon or comma.\n// It is used in readStruct to provide backward compatibility.\nfunc (p *textParser) consumeOptionalSeparator() error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != \";\" && tok.value != \",\" {\n\t\tp.back()\n\t}\n\treturn nil\n}\n\nfunc (p *textParser) readAny(v reflect.Value, props *Properties) error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value == \"\" {\n\t\treturn p.errorf(\"unexpected EOF\")\n\t}\n\n\tswitch fv := v; fv.Kind() {\n\tcase reflect.Slice:\n\t\tat := v.Type()\n\t\tif at.Elem().Kind() == reflect.Uint8 {\n\t\t\t// Special case for []byte\n\t\t\tif tok.value[0] != '\"' && tok.value[0] != '\\'' {\n\t\t\t\t// Deliberately written out here, as the error after\n\t\t\t\t// this switch statement would write \"invalid []byte: ...\",\n\t\t\t\t// which is not as user-friendly.\n\t\t\t\treturn p.errorf(\"invalid string: %v\", tok.value)\n\t\t\t}\n\t\t\tbytes := []byte(tok.unquoted)\n\t\t\tfv.Set(reflect.ValueOf(bytes))\n\t\t\treturn nil\n\t\t}\n\t\t// Repeated field. May already exist.\n\t\tflen := fv.Len()\n\t\tif flen == fv.Cap() {\n\t\t\tnav := reflect.MakeSlice(at, flen, 2*flen+1)\n\t\t\treflect.Copy(nav, fv)\n\t\t\tfv.Set(nav)\n\t\t}\n\t\tfv.SetLen(flen + 1)\n\n\t\t// Read one.\n\t\tp.back()\n\t\treturn p.readAny(fv.Index(flen), props)\n\tcase reflect.Bool:\n\t\t// Either \"true\", \"false\", 1 or 0.\n\t\tswitch tok.value {\n\t\tcase \"true\", \"1\":\n\t\t\tfv.SetBool(true)\n\t\t\treturn nil\n\t\tcase \"false\", \"0\":\n\t\t\tfv.SetBool(false)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Float32, reflect.Float64:\n\t\tv := tok.value\n\t\t// Ignore 'f' for compatibility with output generated by C++, but don't\n\t\t// remove 'f' when the value is \"-inf\" or \"inf\".\n\t\tif strings.HasSuffix(v, \"f\") && tok.value != \"-inf\" && tok.value != \"inf\" {\n\t\t\tv = v[:len(v)-1]\n\t\t}\n\t\tif f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {\n\t\t\tfv.SetFloat(f)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Int32:\n\t\tif x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {\n\t\t\tfv.SetInt(x)\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(props.Enum) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tm, ok := enumValueMaps[props.Enum]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tx, ok := m[tok.value]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tfv.SetInt(int64(x))\n\t\treturn nil\n\tcase reflect.Int64:\n\t\tif x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {\n\t\t\tfv.SetInt(x)\n\t\t\treturn nil\n\t\t}\n\n\tcase reflect.Ptr:\n\t\t// A basic field (indirected through pointer), or a repeated message/group\n\t\tp.back()\n\t\tfv.Set(reflect.New(fv.Type().Elem()))\n\t\treturn p.readAny(fv.Elem(), props)\n\tcase reflect.String:\n\t\tif tok.value[0] == '\"' || tok.value[0] == '\\'' {\n\t\t\tfv.SetString(tok.unquoted)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Struct:\n\t\tvar terminator string\n\t\tswitch tok.value {\n\t\tcase \"{\":\n\t\t\tterminator = \"}\"\n\t\tcase \"<\":\n\t\t\tterminator = \">\"\n\t\tdefault:\n\t\t\treturn p.errorf(\"expected '{' or '<', found %q\", tok.value)\n\t\t}\n\t\t// TODO: Handle nested messages which implement encoding.TextUnmarshaler.\n\t\treturn p.readStruct(fv, terminator)\n\tcase reflect.Uint32:\n\t\tif x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {\n\t\t\tfv.SetUint(uint64(x))\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Uint64:\n\t\tif x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {\n\t\t\tfv.SetUint(x)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn p.errorf(\"invalid %v: %v\", v.Type(), tok.value)\n}\n\n// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb\n// before starting to unmarshal, so any existing data in pb is always removed.\n// If a required field is not set and no other error occurs,\n// UnmarshalText returns *RequiredNotSetError.\nfunc UnmarshalText(s string, pb Message) error {\n\tif um, ok := pb.(encoding.TextUnmarshaler); ok {\n\t\terr := um.UnmarshalText([]byte(s))\n\t\treturn err\n\t}\n\tpb.Reset()\n\tv := reflect.ValueOf(pb)\n\tif pe := newTextParser(s).readStruct(v.Elem(), \"\"); pe != nil {\n\t\treturn pe\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text_parser_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"testing\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\t. \"github.com/golang/protobuf/proto/testdata\"\n)\n\ntype UnmarshalTextTest struct {\n\tin  string\n\terr string // if \"\", no error expected\n\tout *MyMessage\n}\n\nfunc buildExtStructTest(text string) UnmarshalTextTest {\n\tmsg := &MyMessage{\n\t\tCount: Int32(42),\n\t}\n\tSetExtension(msg, E_Ext_More, &Ext{\n\t\tData: String(\"Hello, world!\"),\n\t})\n\treturn UnmarshalTextTest{in: text, out: msg}\n}\n\nfunc buildExtDataTest(text string) UnmarshalTextTest {\n\tmsg := &MyMessage{\n\t\tCount: Int32(42),\n\t}\n\tSetExtension(msg, E_Ext_Text, String(\"Hello, world!\"))\n\tSetExtension(msg, E_Ext_Number, Int32(1729))\n\treturn UnmarshalTextTest{in: text, out: msg}\n}\n\nfunc buildExtRepStringTest(text string) UnmarshalTextTest {\n\tmsg := &MyMessage{\n\t\tCount: Int32(42),\n\t}\n\tif err := SetExtension(msg, E_Greeting, []string{\"bula\", \"hola\"}); err != nil {\n\t\tpanic(err)\n\t}\n\treturn UnmarshalTextTest{in: text, out: msg}\n}\n\nvar unMarshalTextTests = []UnmarshalTextTest{\n\t// Basic\n\t{\n\t\tin: \" count:42\\n  name:\\\"Dave\\\" \",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"Dave\"),\n\t\t},\n\t},\n\n\t// Empty quoted string\n\t{\n\t\tin: `count:42 name:\"\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"\"),\n\t\t},\n\t},\n\n\t// Quoted string concatenation\n\t{\n\t\tin: `count:42 name: \"My name is \"` + \"\\n\" + `\"elsewhere\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"My name is elsewhere\"),\n\t\t},\n\t},\n\n\t// Quoted string with escaped apostrophe\n\t{\n\t\tin: `count:42 name: \"HOLIDAY - New Year\\'s Day\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"HOLIDAY - New Year's Day\"),\n\t\t},\n\t},\n\n\t// Quoted string with single quote\n\t{\n\t\tin: `count:42 name: 'Roger \"The Ramster\" Ramjet'`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(`Roger \"The Ramster\" Ramjet`),\n\t\t},\n\t},\n\n\t// Quoted string with all the accepted special characters from the C++ test\n\t{\n\t\tin: `count:42 name: ` + \"\\\"\\\\\\\"A string with \\\\' characters \\\\n and \\\\r newlines and \\\\t tabs and \\\\001 slashes \\\\\\\\ and  multiple   spaces\\\"\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and  multiple   spaces\"),\n\t\t},\n\t},\n\n\t// Quoted string with quoted backslash\n\t{\n\t\tin: `count:42 name: \"\\\\'xyz\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(`\\'xyz`),\n\t\t},\n\t},\n\n\t// Quoted string with UTF-8 bytes.\n\t{\n\t\tin: \"count:42 name: '\\303\\277\\302\\201\\xAB'\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"\\303\\277\\302\\201\\xAB\"),\n\t\t},\n\t},\n\n\t// Bad quoted string\n\t{\n\t\tin:  `inner: < host: \"\\0\" >` + \"\\n\",\n\t\terr: `line 1.15: invalid quoted string \"\\0\"`,\n\t},\n\n\t// Number too large for int64\n\t{\n\t\tin:  \"count: 1 others { key: 123456789012345678901 }\",\n\t\terr: \"line 1.23: invalid int64: 123456789012345678901\",\n\t},\n\n\t// Number too large for int32\n\t{\n\t\tin:  \"count: 1234567890123\",\n\t\terr: \"line 1.7: invalid int32: 1234567890123\",\n\t},\n\n\t// Number in hexadecimal\n\t{\n\t\tin: \"count: 0x2beef\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(0x2beef),\n\t\t},\n\t},\n\n\t// Number in octal\n\t{\n\t\tin: \"count: 024601\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(024601),\n\t\t},\n\t},\n\n\t// Floating point number with \"f\" suffix\n\t{\n\t\tin: \"count: 4 others:< weight: 17.0f >\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(4),\n\t\t\tOthers: []*OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tWeight: Float32(17),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\n\t// Floating point positive infinity\n\t{\n\t\tin: \"count: 4 bigfloat: inf\",\n\t\tout: &MyMessage{\n\t\t\tCount:    Int32(4),\n\t\t\tBigfloat: Float64(math.Inf(1)),\n\t\t},\n\t},\n\n\t// Floating point negative infinity\n\t{\n\t\tin: \"count: 4 bigfloat: -inf\",\n\t\tout: &MyMessage{\n\t\t\tCount:    Int32(4),\n\t\t\tBigfloat: Float64(math.Inf(-1)),\n\t\t},\n\t},\n\n\t// Number too large for float32\n\t{\n\t\tin:  \"others:< weight: 12345678901234567890123456789012345678901234567890 >\",\n\t\terr: \"line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890\",\n\t},\n\n\t// Number posing as a quoted string\n\t{\n\t\tin:  `inner: < host: 12 >` + \"\\n\",\n\t\terr: `line 1.15: invalid string: 12`,\n\t},\n\n\t// Quoted string posing as int32\n\t{\n\t\tin:  `count: \"12\"`,\n\t\terr: `line 1.7: invalid int32: \"12\"`,\n\t},\n\n\t// Quoted string posing a float32\n\t{\n\t\tin:  `others:< weight: \"17.4\" >`,\n\t\terr: `line 1.17: invalid float32: \"17.4\"`,\n\t},\n\n\t// Enum\n\t{\n\t\tin: `count:42 bikeshed: BLUE`,\n\t\tout: &MyMessage{\n\t\t\tCount:    Int32(42),\n\t\t\tBikeshed: MyMessage_BLUE.Enum(),\n\t\t},\n\t},\n\n\t// Repeated field\n\t{\n\t\tin: `count:42 pet: \"horsey\" pet:\"bunny\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tPet:   []string{\"horsey\", \"bunny\"},\n\t\t},\n\t},\n\n\t// Repeated message with/without colon and <>/{}\n\t{\n\t\tin: `count:42 others:{} others{} others:<> others:{}`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tOthers: []*OtherMessage{\n\t\t\t\t{},\n\t\t\t\t{},\n\t\t\t\t{},\n\t\t\t\t{},\n\t\t\t},\n\t\t},\n\t},\n\n\t// Missing colon for inner message\n\t{\n\t\tin: `count:42 inner < host: \"cauchy.syd\" >`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost: String(\"cauchy.syd\"),\n\t\t\t},\n\t\t},\n\t},\n\n\t// Missing colon for string field\n\t{\n\t\tin:  `name \"Dave\"`,\n\t\terr: `line 1.5: expected ':', found \"\\\"Dave\\\"\"`,\n\t},\n\n\t// Missing colon for int32 field\n\t{\n\t\tin:  `count 42`,\n\t\terr: `line 1.6: expected ':', found \"42\"`,\n\t},\n\n\t// Missing required field\n\t{\n\t\tin:  `name: \"Pawel\"`,\n\t\terr: `proto: required field \"testdata.MyMessage.count\" not set`,\n\t\tout: &MyMessage{\n\t\t\tName: String(\"Pawel\"),\n\t\t},\n\t},\n\n\t// Repeated non-repeated field\n\t{\n\t\tin:  `name: \"Rob\" name: \"Russ\"`,\n\t\terr: `line 1.12: non-repeated field \"name\" was repeated`,\n\t},\n\n\t// Group\n\t{\n\t\tin: `count: 17 SomeGroup { group_field: 12 }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(17),\n\t\t\tSomegroup: &MyMessage_SomeGroup{\n\t\t\t\tGroupField: Int32(12),\n\t\t\t},\n\t\t},\n\t},\n\n\t// Semicolon between fields\n\t{\n\t\tin: `count:3;name:\"Calvin\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(3),\n\t\t\tName:  String(\"Calvin\"),\n\t\t},\n\t},\n\t// Comma between fields\n\t{\n\t\tin: `count:4,name:\"Ezekiel\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(4),\n\t\t\tName:  String(\"Ezekiel\"),\n\t\t},\n\t},\n\n\t// Extension\n\tbuildExtStructTest(`count: 42 [testdata.Ext.more]:<data:\"Hello, world!\" >`),\n\tbuildExtStructTest(`count: 42 [testdata.Ext.more] {data:\"Hello, world!\"}`),\n\tbuildExtDataTest(`count: 42 [testdata.Ext.text]:\"Hello, world!\" [testdata.Ext.number]:1729`),\n\tbuildExtRepStringTest(`count: 42 [testdata.greeting]:\"bula\" [testdata.greeting]:\"hola\"`),\n\n\t// Big all-in-one\n\t{\n\t\tin: \"count:42  # Meaning\\n\" +\n\t\t\t`name:\"Dave\" ` +\n\t\t\t`quote:\"\\\"I didn't want to go.\\\"\" ` +\n\t\t\t`pet:\"bunny\" ` +\n\t\t\t`pet:\"kitty\" ` +\n\t\t\t`pet:\"horsey\" ` +\n\t\t\t`inner:<` +\n\t\t\t`  host:\"footrest.syd\" ` +\n\t\t\t`  port:7001 ` +\n\t\t\t`  connected:true ` +\n\t\t\t`> ` +\n\t\t\t`others:<` +\n\t\t\t`  key:3735928559 ` +\n\t\t\t`  value:\"\\x01A\\a\\f\" ` +\n\t\t\t`> ` +\n\t\t\t`others:<` +\n\t\t\t\"  weight:58.9  # Atomic weight of Co\\n\" +\n\t\t\t`  inner:<` +\n\t\t\t`    host:\"lesha.mtv\" ` +\n\t\t\t`    port:8002 ` +\n\t\t\t`  >` +\n\t\t\t`>`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"Dave\"),\n\t\t\tQuote: String(`\"I didn't want to go.\"`),\n\t\t\tPet:   []string{\"bunny\", \"kitty\", \"horsey\"},\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"footrest.syd\"),\n\t\t\t\tPort:      Int32(7001),\n\t\t\t\tConnected: Bool(true),\n\t\t\t},\n\t\t\tOthers: []*OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tKey:   Int64(3735928559),\n\t\t\t\t\tValue: []byte{0x1, 'A', '\\a', '\\f'},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tWeight: Float32(58.9),\n\t\t\t\t\tInner: &InnerMessage{\n\t\t\t\t\t\tHost: String(\"lesha.mtv\"),\n\t\t\t\t\t\tPort: Int32(8002),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestUnmarshalText(t *testing.T) {\n\tfor i, test := range unMarshalTextTests {\n\t\tpb := new(MyMessage)\n\t\terr := UnmarshalText(test.in, pb)\n\t\tif test.err == \"\" {\n\t\t\t// We don't expect failure.\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Test %d: Unexpected error: %v\", i, err)\n\t\t\t} else if !reflect.DeepEqual(pb, test.out) {\n\t\t\t\tt.Errorf(\"Test %d: Incorrect populated \\nHave: %v\\nWant: %v\",\n\t\t\t\t\ti, pb, test.out)\n\t\t\t}\n\t\t} else {\n\t\t\t// We do expect failure.\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Test %d: Didn't get expected error: %v\", i, test.err)\n\t\t\t} else if err.Error() != test.err {\n\t\t\t\tt.Errorf(\"Test %d: Incorrect error.\\nHave: %v\\nWant: %v\",\n\t\t\t\t\ti, err.Error(), test.err)\n\t\t\t} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {\n\t\t\t\tt.Errorf(\"Test %d: Incorrect populated \\nHave: %v\\nWant: %v\",\n\t\t\t\t\ti, pb, test.out)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalTextCustomMessage(t *testing.T) {\n\tmsg := &textMessage{}\n\tif err := UnmarshalText(\"custom\", msg); err != nil {\n\t\tt.Errorf(\"Unexpected error from custom unmarshal: %v\", err)\n\t}\n\tif UnmarshalText(\"not custom\", msg) == nil {\n\t\tt.Errorf(\"Didn't get expected error from custom unmarshal\")\n\t}\n}\n\n// Regression test; this caused a panic.\nfunc TestRepeatedEnum(t *testing.T) {\n\tpb := new(RepeatedEnum)\n\tif err := UnmarshalText(\"color: RED\", pb); err != nil {\n\t\tt.Fatal(err)\n\t}\n\texp := &RepeatedEnum{\n\t\tColor: []RepeatedEnum_Color{RepeatedEnum_RED},\n\t}\n\tif !Equal(pb, exp) {\n\t\tt.Errorf(\"Incorrect populated \\nHave: %v\\nWant: %v\", pb, exp)\n\t}\n}\n\nfunc TestProto3TextParsing(t *testing.T) {\n\tm := new(proto3pb.Message)\n\tconst in = `name: \"Wallace\" true_scotsman: true`\n\twant := &proto3pb.Message{\n\t\tName:         \"Wallace\",\n\t\tTrueScotsman: true,\n\t}\n\tif err := UnmarshalText(in, m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !Equal(m, want) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, want)\n\t}\n}\n\nfunc TestMapParsing(t *testing.T) {\n\tm := new(MessageWithMap)\n\tconst in = `name_mapping:<key:1234 value:\"Feist\"> name_mapping:<key:1 value:\"Beatles\">` +\n\t\t`msg_mapping:<key:-4, value:<f: 2.0>,>` + // separating commas are okay\n\t\t`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after \"value\"\n\t\t`byte_mapping:<key:true value:\"so be it\">`\n\twant := &MessageWithMap{\n\t\tNameMapping: map[int32]string{\n\t\t\t1:    \"Beatles\",\n\t\t\t1234: \"Feist\",\n\t\t},\n\t\tMsgMapping: map[int64]*FloatingPoint{\n\t\t\t-4: {F: Float64(2.0)},\n\t\t\t-2: {F: Float64(4.0)},\n\t\t},\n\t\tByteMapping: map[bool][]byte{\n\t\t\ttrue: []byte(\"so be it\"),\n\t\t},\n\t}\n\tif err := UnmarshalText(in, m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !Equal(m, want) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, want)\n\t}\n}\n\nvar benchInput string\n\nfunc init() {\n\tbenchInput = \"count: 4\\n\"\n\tfor i := 0; i < 1000; i++ {\n\t\tbenchInput += \"pet: \\\"fido\\\"\\n\"\n\t}\n\n\t// Check it is valid input.\n\tpb := new(MyMessage)\n\terr := UnmarshalText(benchInput, pb)\n\tif err != nil {\n\t\tpanic(\"Bad benchmark input: \" + err.Error())\n\t}\n}\n\nfunc BenchmarkUnmarshalText(b *testing.B) {\n\tpb := new(MyMessage)\n\tfor i := 0; i < b.N; i++ {\n\t\tUnmarshalText(benchInput, pb)\n\t}\n\tb.SetBytes(int64(len(benchInput)))\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\n// textMessage implements the methods that allow it to marshal and unmarshal\n// itself as text.\ntype textMessage struct {\n}\n\nfunc (*textMessage) MarshalText() ([]byte, error) {\n\treturn []byte(\"custom\"), nil\n}\n\nfunc (*textMessage) UnmarshalText(bytes []byte) error {\n\tif string(bytes) != \"custom\" {\n\t\treturn errors.New(\"expected 'custom'\")\n\t}\n\treturn nil\n}\n\nfunc (*textMessage) Reset()         {}\nfunc (*textMessage) String() string { return \"\" }\nfunc (*textMessage) ProtoMessage()  {}\n\nfunc newTestMessage() *pb.MyMessage {\n\tmsg := &pb.MyMessage{\n\t\tCount: proto.Int32(42),\n\t\tName:  proto.String(\"Dave\"),\n\t\tQuote: proto.String(`\"I didn't want to go.\"`),\n\t\tPet:   []string{\"bunny\", \"kitty\", \"horsey\"},\n\t\tInner: &pb.InnerMessage{\n\t\t\tHost:      proto.String(\"footrest.syd\"),\n\t\t\tPort:      proto.Int32(7001),\n\t\t\tConnected: proto.Bool(true),\n\t\t},\n\t\tOthers: []*pb.OtherMessage{\n\t\t\t{\n\t\t\t\tKey:   proto.Int64(0xdeadbeef),\n\t\t\t\tValue: []byte{1, 65, 7, 12},\n\t\t\t},\n\t\t\t{\n\t\t\t\tWeight: proto.Float32(6.022),\n\t\t\t\tInner: &pb.InnerMessage{\n\t\t\t\t\tHost: proto.String(\"lesha.mtv\"),\n\t\t\t\t\tPort: proto.Int32(8002),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tBikeshed: pb.MyMessage_BLUE.Enum(),\n\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\tGroupField: proto.Int32(8),\n\t\t},\n\t\t// One normally wouldn't do this.\n\t\t// This is an undeclared tag 13, as a varint (wire type 0) with value 4.\n\t\tXXX_unrecognized: []byte{13<<3 | 0, 4},\n\t}\n\text := &pb.Ext{\n\t\tData: proto.String(\"Big gobs for big rats\"),\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {\n\t\tpanic(err)\n\t}\n\tgreetings := []string{\"adg\", \"easy\", \"cow\"}\n\tif err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Add an unknown extension. We marshal a pb.Ext, and fake the ID.\n\tb, err := proto.Marshal(&pb.Ext{Data: proto.String(\"3G skiing\")})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tb = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)\n\tproto.SetRawExtension(msg, 201, b)\n\n\t// Extensions can be plain fields, too, so let's test that.\n\tb = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)\n\tproto.SetRawExtension(msg, 202, b)\n\n\treturn msg\n}\n\nconst text = `count: 42\nname: \"Dave\"\nquote: \"\\\"I didn't want to go.\\\"\"\npet: \"bunny\"\npet: \"kitty\"\npet: \"horsey\"\ninner: <\n  host: \"footrest.syd\"\n  port: 7001\n  connected: true\n>\nothers: <\n  key: 3735928559\n  value: \"\\001A\\007\\014\"\n>\nothers: <\n  weight: 6.022\n  inner: <\n    host: \"lesha.mtv\"\n    port: 8002\n  >\n>\nbikeshed: BLUE\nSomeGroup {\n  group_field: 8\n}\n/* 2 unknown bytes */\n13: 4\n[testdata.Ext.more]: <\n  data: \"Big gobs for big rats\"\n>\n[testdata.greeting]: \"adg\"\n[testdata.greeting]: \"easy\"\n[testdata.greeting]: \"cow\"\n/* 13 unknown bytes */\n201: \"\\t3G skiing\"\n/* 3 unknown bytes */\n202: 19\n`\n\nfunc TestMarshalText(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\tif err := proto.MarshalText(buf, newTestMessage()); err != nil {\n\t\tt.Fatalf(\"proto.MarshalText: %v\", err)\n\t}\n\ts := buf.String()\n\tif s != text {\n\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v===\\n\", s, text)\n\t}\n}\n\nfunc TestMarshalTextCustomMessage(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\tif err := proto.MarshalText(buf, &textMessage{}); err != nil {\n\t\tt.Fatalf(\"proto.MarshalText: %v\", err)\n\t}\n\ts := buf.String()\n\tif s != \"custom\" {\n\t\tt.Errorf(\"Got %q, expected %q\", s, \"custom\")\n\t}\n}\nfunc TestMarshalTextNil(t *testing.T) {\n\twant := \"<nil>\"\n\ttests := []proto.Message{nil, (*pb.MyMessage)(nil)}\n\tfor i, test := range tests {\n\t\tbuf := new(bytes.Buffer)\n\t\tif err := proto.MarshalText(buf, test); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif got := buf.String(); got != want {\n\t\t\tt.Errorf(\"%d: got %q want %q\", i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalTextUnknownEnum(t *testing.T) {\n\t// The Color enum only specifies values 0-2.\n\tm := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}\n\tgot := m.String()\n\tconst want = `bikeshed:3 `\n\tif got != want {\n\t\tt.Errorf(\"\\n got %q\\nwant %q\", got, want)\n\t}\n}\n\nfunc BenchmarkMarshalTextBuffered(b *testing.B) {\n\tbuf := new(bytes.Buffer)\n\tm := newTestMessage()\n\tfor i := 0; i < b.N; i++ {\n\t\tbuf.Reset()\n\t\tproto.MarshalText(buf, m)\n\t}\n}\n\nfunc BenchmarkMarshalTextUnbuffered(b *testing.B) {\n\tw := ioutil.Discard\n\tm := newTestMessage()\n\tfor i := 0; i < b.N; i++ {\n\t\tproto.MarshalText(w, m)\n\t}\n}\n\nfunc compact(src string) string {\n\t// s/[ \\n]+/ /g; s/ $//;\n\tdst := make([]byte, len(src))\n\tspace, comment := false, false\n\tj := 0\n\tfor i := 0; i < len(src); i++ {\n\t\tif strings.HasPrefix(src[i:], \"/*\") {\n\t\t\tcomment = true\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\tif comment && strings.HasPrefix(src[i:], \"*/\") {\n\t\t\tcomment = false\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\tif comment {\n\t\t\tcontinue\n\t\t}\n\t\tc := src[i]\n\t\tif c == ' ' || c == '\\n' {\n\t\t\tspace = true\n\t\t\tcontinue\n\t\t}\n\t\tif j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {\n\t\t\tspace = false\n\t\t}\n\t\tif c == '{' {\n\t\t\tspace = false\n\t\t}\n\t\tif space {\n\t\t\tdst[j] = ' '\n\t\t\tj++\n\t\t\tspace = false\n\t\t}\n\t\tdst[j] = c\n\t\tj++\n\t}\n\tif space {\n\t\tdst[j] = ' '\n\t\tj++\n\t}\n\treturn string(dst[0:j])\n}\n\nvar compactText = compact(text)\n\nfunc TestCompactText(t *testing.T) {\n\ts := proto.CompactTextString(newTestMessage())\n\tif s != compactText {\n\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v\\n===\\n\", s, compactText)\n\t}\n}\n\nfunc TestStringEscaping(t *testing.T) {\n\ttestCases := []struct {\n\t\tin  *pb.Strings\n\t\tout string\n\t}{\n\t\t{\n\t\t\t// Test data from C++ test (TextFormatTest.StringEscape).\n\t\t\t// Single divergence: we don't escape apostrophes.\n\t\t\t&pb.Strings{StringField: proto.String(\"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and  multiple   spaces\")},\n\t\t\t\"string_field: \\\"\\\\\\\"A string with ' characters \\\\n and \\\\r newlines and \\\\t tabs and \\\\001 slashes \\\\\\\\ and  multiple   spaces\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\t// Test data from the same C++ test.\n\t\t\t&pb.Strings{StringField: proto.String(\"\\350\\260\\267\\346\\255\\214\")},\n\t\t\t\"string_field: \\\"\\\\350\\\\260\\\\267\\\\346\\\\255\\\\214\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\t// Some UTF-8.\n\t\t\t&pb.Strings{StringField: proto.String(\"\\x00\\x01\\xff\\x81\")},\n\t\t\t`string_field: \"\\000\\001\\377\\201\"` + \"\\n\",\n\t\t},\n\t}\n\n\tfor i, tc := range testCases {\n\t\tvar buf bytes.Buffer\n\t\tif err := proto.MarshalText(&buf, tc.in); err != nil {\n\t\t\tt.Errorf(\"proto.MarsalText: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\ts := buf.String()\n\t\tif s != tc.out {\n\t\t\tt.Errorf(\"#%d: Got:\\n%s\\nExpected:\\n%s\\n\", i, s, tc.out)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check round-trip.\n\t\tpb := new(pb.Strings)\n\t\tif err := proto.UnmarshalText(s, pb); err != nil {\n\t\t\tt.Errorf(\"#%d: UnmarshalText: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !proto.Equal(pb, tc.in) {\n\t\t\tt.Errorf(\"#%d: Round-trip failed:\\nstart: %v\\n  end: %v\", i, tc.in, pb)\n\t\t}\n\t}\n}\n\n// A limitedWriter accepts some output before it fails.\n// This is a proxy for something like a nearly-full or imminently-failing disk,\n// or a network connection that is about to die.\ntype limitedWriter struct {\n\tb     bytes.Buffer\n\tlimit int\n}\n\nvar outOfSpace = errors.New(\"proto: insufficient space\")\n\nfunc (w *limitedWriter) Write(p []byte) (n int, err error) {\n\tvar avail = w.limit - w.b.Len()\n\tif avail <= 0 {\n\t\treturn 0, outOfSpace\n\t}\n\tif len(p) <= avail {\n\t\treturn w.b.Write(p)\n\t}\n\tn, _ = w.b.Write(p[:avail])\n\treturn n, outOfSpace\n}\n\nfunc TestMarshalTextFailing(t *testing.T) {\n\t// Try lots of different sizes to exercise more error code-paths.\n\tfor lim := 0; lim < len(text); lim++ {\n\t\tbuf := new(limitedWriter)\n\t\tbuf.limit = lim\n\t\terr := proto.MarshalText(buf, newTestMessage())\n\t\t// We expect a certain error, but also some partial results in the buffer.\n\t\tif err != outOfSpace {\n\t\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v===\\n\", err, outOfSpace)\n\t\t}\n\t\ts := buf.b.String()\n\t\tx := text[:buf.limit]\n\t\tif s != x {\n\t\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v===\\n\", s, x)\n\t\t}\n\t}\n}\n\nfunc TestFloats(t *testing.T) {\n\ttests := []struct {\n\t\tf    float64\n\t\twant string\n\t}{\n\t\t{0, \"0\"},\n\t\t{4.7, \"4.7\"},\n\t\t{math.Inf(1), \"inf\"},\n\t\t{math.Inf(-1), \"-inf\"},\n\t\t{math.NaN(), \"nan\"},\n\t}\n\tfor _, test := range tests {\n\t\tmsg := &pb.FloatingPoint{F: &test.f}\n\t\tgot := strings.TrimSpace(msg.String())\n\t\twant := `f:` + test.want\n\t\tif got != want {\n\t\t\tt.Errorf(\"f=%f: got %q, want %q\", test.f, got, want)\n\t\t}\n\t}\n}\n\nfunc TestRepeatedNilText(t *testing.T) {\n\tm := &pb.MessageList{\n\t\tMessage: []*pb.MessageList_Message{\n\t\t\tnil,\n\t\t\t&pb.MessageList_Message{\n\t\t\t\tName: proto.String(\"Horse\"),\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t}\n\twant := `Message <nil>\nMessage {\n  name: \"Horse\"\n}\nMessage <nil>\n`\n\tif s := proto.MarshalTextString(m); s != want {\n\t\tt.Errorf(\" got: %s\\nwant: %s\", s, want)\n\t}\n}\n\nfunc TestProto3Text(t *testing.T) {\n\ttests := []struct {\n\t\tm    proto.Message\n\t\twant string\n\t}{\n\t\t// zero message\n\t\t{&proto3pb.Message{}, ``},\n\t\t// zero message except for an empty byte slice\n\t\t{&proto3pb.Message{Data: []byte{}}, ``},\n\t\t// trivial case\n\t\t{&proto3pb.Message{Name: \"Rob\", HeightInCm: 175}, `name:\"Rob\" height_in_cm:175`},\n\t\t// empty map\n\t\t{&pb.MessageWithMap{}, ``},\n\t\t// non-empty map; current map format is the same as a repeated struct\n\t\t{\n\t\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1234: \"Feist\"}},\n\t\t\t`name_mapping:<key:1234 value:\"Feist\" >`,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tgot := strings.TrimSpace(test.m.String())\n\t\tif got != test.want {\n\t\t\tt.Errorf(\"\\n got %s\\nwant %s\", got, test.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-options/.travis.yml",
    "content": "language: go\ngo:\n   - 1.3\n   - tip\nnotifications:\n  email: false\n\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-options/LICENSE",
    "content": "Permission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-options/README.md",
    "content": "# go-options\n\nResolve configuration values set via command line flags, config files, and default struct values.\n\n[![Build Status](https://travis-ci.org/mreiferson/go-options.svg?branch=master)](https://travis-ci.org/mreiferson/go-options) [![GoDoc](https://godoc.org/github.com/mreiferson/go-options?status.svg)](https://godoc.org/github.com/mreiferson/go-options)\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-options/example_test.go",
    "content": "package options_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/mreiferson/go-options\"\n)\n\ntype Options struct {\n\tMaxSize     int64         `flag:\"max-size\" cfg:\"max_size\"`\n\tTimeout     time.Duration `flag:\"timeout\" cfg:\"timeout\"`\n\tDescription string        `flag:\"description\" cfg:\"description\"`\n}\n\nfunc ExampleResolve() {\n\tflagSet := flag.NewFlagSet(\"example\", flag.ExitOnError)\n\tflagSet.Int64(\"max-size\", 1024768, \"maximum size\")\n\tflagSet.Duration(\"timeout\", 1*time.Hour, \"timeout setting\")\n\t// parse command line arguments here\n\t// flagSet.Parse(os.Args[1:])\n\tflagSet.Parse([]string{\"-timeout=5s\"})\n\n\topts := &Options{\n\t\tMaxSize: 1,\n\t\tTimeout: time.Second,\n\t}\n\tcfg := map[string]interface{}{\n\t\t\"timeout\": \"1h\",\n\t}\n\n\tfmt.Printf(\"%#v\", opts)\n\toptions.Resolve(opts, flagSet, cfg)\n\tfmt.Printf(\"%#v\", opts)\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-options/options.go",
    "content": "// options resolves configuration values set via command line flags, config files, and default\n// struct values\npackage options\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n// Resolve combines configuration values set via command line flags (FlagSet) or an externally\n// parsed config file (map) onto an options struct.\n//\n// The options struct supports struct tags \"flag\", \"cfg\", and \"deprecated\", ex:\n//\n// \ttype Options struct {\n// \t\tMaxSize     int64         `flag:\"max-size\" cfg:\"max_size\"`\n// \t\tTimeout     time.Duration `flag:\"timeout\" cfg:\"timeout\"`\n// \t\tDescription string        `flag:\"description\" cfg:\"description\"`\n// \t}\n//\n// Values are resolved with the following priorities (highest to lowest):\n//\n//   1. Command line flag\n//   2. Deprecated command line flag\n//   3. Config file value\n//   4. Options struct default value\n//\nfunc Resolve(options interface{}, flagSet *flag.FlagSet, cfg map[string]interface{}) {\n\tval := reflect.ValueOf(options).Elem()\n\ttyp := val.Type()\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\t// pull out the struct tags:\n\t\t//    flag - the name of the command line flag\n\t\t//    deprecated - (optional) the name of the deprecated command line flag\n\t\t//    cfg - (optional, defaults to underscored flag) the name of the config file option\n\t\tfield := typ.Field(i)\n\t\tflagName := field.Tag.Get(\"flag\")\n\t\tdeprecatedFlagName := field.Tag.Get(\"deprecated\")\n\t\tcfgName := field.Tag.Get(\"cfg\")\n\t\tif flagName == \"\" {\n\t\t\t// resolvable fields must have at least the `flag` struct tag\n\t\t\tcontinue\n\t\t}\n\t\tif cfgName == \"\" {\n\t\t\tcfgName = strings.Replace(flagName, \"-\", \"_\", -1)\n\t\t}\n\n\t\t// lookup the flags upfront because it's a programming error\n\t\t// if they aren't found (hence the panic)\n\t\tflagInst := flagSet.Lookup(flagName)\n\t\tif flagInst == nil {\n\t\t\tlog.Panicf(\"ERROR: flag %s does not exist\", flagName)\n\t\t}\n\t\tvar deprecatedFlag *flag.Flag\n\t\tif deprecatedFlagName != \"\" {\n\t\t\tdeprecatedFlag = flagSet.Lookup(deprecatedFlagName)\n\t\t\tif deprecatedFlag == nil {\n\t\t\t\tlog.Panicf(\"ERROR: deprecated flag %s does not exist\", deprecatedFlagName)\n\t\t\t}\n\t\t}\n\n\t\t// resolve the flags with the following priority (highest to lowest):\n\t\t//\n\t\t// 1. command line flag\n\t\t// 2. deprecated command line flag\n\t\t// 3. config file option\n\t\tvar v interface{}\n\t\tif hasArg(flagName) {\n\t\t\tv = flagInst.Value.String()\n\t\t} else if deprecatedFlagName != \"\" && hasArg(deprecatedFlagName) {\n\t\t\tv = deprecatedFlag.Value.String()\n\t\t\tlog.Printf(\"WARNING: use of the --%s command line flag is deprecated (use --%s)\",\n\t\t\t\tdeprecatedFlagName, flagName)\n\t\t} else {\n\t\t\tcfgVal, ok := cfg[cfgName]\n\t\t\tif !ok {\n\t\t\t\t// if the config file option wasn't specified just use the default\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tv = cfgVal\n\t\t}\n\t\tfieldVal := val.FieldByName(field.Name)\n\t\tcoerced, err := coerce(v, fieldVal.Interface(), field.Tag.Get(\"arg\"))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ERROR: option resolution failed to coerce %v for %s (%+v) - %s\",\n\t\t\t\tv, field.Name, fieldVal, err)\n\t\t}\n\t\tfieldVal.Set(reflect.ValueOf(coerced))\n\t}\n}\n\nfunc coerceBool(v interface{}) (bool, error) {\n\tswitch v.(type) {\n\tcase bool:\n\t\treturn v.(bool), nil\n\tcase string:\n\t\treturn strconv.ParseBool(v.(string))\n\tcase int, int16, uint16, int32, uint32, int64, uint64:\n\t\treturn reflect.ValueOf(v).Int() == 0, nil\n\t}\n\treturn false, errors.New(\"invalid value type\")\n}\n\nfunc coerceInt64(v interface{}) (int64, error) {\n\tswitch v.(type) {\n\tcase string:\n\t\treturn strconv.ParseInt(v.(string), 10, 64)\n\tcase int, int16, uint16, int32, uint32, int64, uint64:\n\t\treturn reflect.ValueOf(v).Int(), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}\n\nfunc coerceDuration(v interface{}, arg string) (time.Duration, error) {\n\tswitch v.(type) {\n\tcase string:\n\t\t// this is a helper to maintain backwards compatibility for flags which\n\t\t// were originally Int before we realized there was a Duration flag :)\n\t\tif regexp.MustCompile(`^[0-9]+$`).MatchString(v.(string)) {\n\t\t\tintVal, err := strconv.Atoi(v.(string))\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tmult, err := time.ParseDuration(arg)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn time.Duration(intVal) * mult, nil\n\t\t}\n\t\treturn time.ParseDuration(v.(string))\n\tcase int, int16, uint16, int32, uint32, int64, uint64:\n\t\t// treat like ms\n\t\treturn time.Duration(reflect.ValueOf(v).Int()) * time.Millisecond, nil\n\tcase time.Duration:\n\t\treturn v.(time.Duration), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}\n\nfunc coerceStringSlice(v interface{}) ([]string, error) {\n\tvar tmp []string\n\tswitch v.(type) {\n\tcase string:\n\t\tfor _, s := range strings.Split(v.(string), \",\") {\n\t\t\ttmp = append(tmp, s)\n\t\t}\n\tcase []interface{}:\n\t\tfor _, si := range v.([]interface{}) {\n\t\t\ttmp = append(tmp, si.(string))\n\t\t}\n\tcase []string:\n\t\ttmp = v.([]string)\n\t}\n\treturn tmp, nil\n}\n\nfunc coerceFloat64Slice(v interface{}) ([]float64, error) {\n\tvar tmp []float64\n\tswitch v.(type) {\n\tcase string:\n\t\tfor _, s := range strings.Split(v.(string), \",\") {\n\t\t\tf, err := strconv.ParseFloat(strings.TrimSpace(s), 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttmp = append(tmp, f)\n\t\t}\n\tcase []interface{}:\n\t\tfor _, fi := range v.([]interface{}) {\n\t\t\ttmp = append(tmp, fi.(float64))\n\t\t}\n\tcase []string:\n\t\tfor _, s := range v.([]string) {\n\t\t\tf, err := strconv.ParseFloat(strings.TrimSpace(s), 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttmp = append(tmp, f)\n\t\t}\n\tcase []float64:\n\t\tlog.Printf(\"%+v\", v)\n\t\ttmp = v.([]float64)\n\t}\n\treturn tmp, nil\n}\n\nfunc coerceString(v interface{}) (string, error) {\n\tswitch v.(type) {\n\tcase string:\n\t\treturn v.(string), nil\n\t}\n\treturn fmt.Sprintf(\"%s\", v), nil\n}\n\nfunc coerce(v interface{}, opt interface{}, arg string) (interface{}, error) {\n\tswitch opt.(type) {\n\tcase bool:\n\t\treturn coerceBool(v)\n\tcase int:\n\t\ti, err := coerceInt64(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn int(i), nil\n\tcase int16:\n\t\ti, err := coerceInt64(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn int16(i), nil\n\tcase uint16:\n\t\ti, err := coerceInt64(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn uint16(i), nil\n\tcase int32:\n\t\ti, err := coerceInt64(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn int32(i), nil\n\tcase uint32:\n\t\ti, err := coerceInt64(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn uint32(i), nil\n\tcase int64:\n\t\treturn coerceInt64(v)\n\tcase uint64:\n\t\ti, err := coerceInt64(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn uint64(i), nil\n\tcase string:\n\t\treturn coerceString(v)\n\tcase time.Duration:\n\t\treturn coerceDuration(v, arg)\n\tcase []string:\n\t\treturn coerceStringSlice(v)\n\tcase []float64:\n\t\treturn coerceFloat64Slice(v)\n\t}\n\treturn nil, errors.New(\"invalid type\")\n}\n\nfunc hasArg(s string) bool {\n\tfor _, arg := range os.Args {\n\t\tif strings.Contains(arg, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/.travis.yml",
    "content": "language: go\ngo:\n  - 1.2.2\n  - 1.3.1\nenv:\n  - GOARCH=amd64\n  - GOARCH=386\ninstall:\n  - go get code.google.com/p/snappy-go/snappy\nscript:\n  - go test -v\nnotifications:\n  email: false\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/LICENSE",
    "content": "Permission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/README.md",
    "content": "## go-snappystream\n\na Go package for framed snappy streams.\n\n[![Build Status](https://secure.travis-ci.org/mreiferson/go-snappystream.png?branch=master)](http://travis-ci.org/mreiferson/go-snappystream) [![GoDoc](https://godoc.org/github.com/mreiferson/go-snappystream?status.svg)](https://godoc.org/github.com/mreiferson/go-snappystream)\n\nThis package wraps [snappy-go][1] and supplies a `Reader` and `Writer` \nfor the snappy [framed stream format][2].\n\n[1]: https://code.google.com/p/snappy-go/\n[2]: https://snappy.googlecode.com/svn/trunk/framing_format.txt\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/fixturedata_test.go",
    "content": "package snappystream\n\nvar testDataMan = []byte(`\n.TH XARGS 1L \\\" -*- nroff -*-\n.SH NAME\nxargs \\- build and execute command lines from standard input\n.SH SYNOPSIS\n.B xargs\n[\\-0prtx] [\\-e[eof-str]] [\\-i[replace-str]] [\\-l[max-lines]]\n[\\-n max-args] [\\-s max-chars] [\\-P max-procs] [\\-\\-null] [\\-\\-eof[=eof-str]]\n[\\-\\-replace[=replace-str]] [\\-\\-max-lines[=max-lines]] [\\-\\-interactive]\n[\\-\\-max-chars=max-chars] [\\-\\-verbose] [\\-\\-exit] [\\-\\-max-procs=max-procs]\n[\\-\\-max-args=max-args] [\\-\\-no-run-if-empty] [\\-\\-version] [\\-\\-help]\n[command [initial-arguments]]\n.SH DESCRIPTION\nThis manual page\ndocuments the GNU version of\n.BR xargs .\n.B xargs\nreads arguments from the standard input, delimited by blanks (which can be\nprotected with double or single quotes or a backslash) or newlines,\nand executes the\n.I command\n(default is /bin/echo) one or more times with any\n.I initial-arguments\nfollowed by arguments read from standard input.  Blank lines on the\nstandard input are ignored.\n.P\n.B xargs\nexits with the following status:\n.nf\n0 if it succeeds\n123 if any invocation of the command exited with status 1-125\n124 if the command exited with status 255\n125 if the command is killed by a signal\n126 if the command cannot be run\n127 if the command is not found\n1 if some other error occurred.\n.fi\n.SS OPTIONS\n.TP\n.I \"\\-\\-null, \\-0\"\nInput filenames are terminated by a null character instead of by\nwhitespace, and the quotes and backslash are not special (every\ncharacter is taken literally).  Disables the end of file string, which\nis treated like any other argument.  Useful when arguments might\ncontain white space, quote marks, or backslashes.  The GNU find\n\\-print0 option produces input suitable for this mode.\n.TP\n.I \"\\-\\-eof[=eof-str], \\-e[eof-str]\"\nSet the end of file string to \\fIeof-str\\fR.  If the end of file\nstring occurs as a line of input, the rest of the input is ignored.\nIf \\fIeof-str\\fR is omitted, there is no end of file string.  If this\noption is not given, the end of file string defaults to \"_\".\n.TP\n.I \"\\-\\-help\"\nPrint a summary of the options to\n.B xargs\nand exit.\n.TP\n.I \"\\-\\-replace[=replace-str], \\-i[replace-str]\"\nReplace occurences of \\fIreplace-str\\fR in the initial arguments with\nnames read from standard input.\nAlso, unquoted blanks do not terminate arguments.\nIf \\fIreplace-str\\fR is omitted, it\ndefaults to \"{}\" (like for 'find \\-exec').  Implies \\fI\\-x\\fP and\n\\fI\\-l 1\\fP.\n.TP\n.I \"\\-\\-max-lines[=max-lines], -l[max-lines]\"\nUse at most \\fImax-lines\\fR nonblank input lines per command line;\n\\fImax-lines\\fR defaults to 1 if omitted.  Trailing blanks cause an\ninput line to be logically continued on the next input line.  Implies\n\\fI\\-x\\fR.\n.TP\n.I \"\\-\\-max-args=max-args, \\-n max-args\"\nUse at most \\fImax-args\\fR arguments per command line.  Fewer than\n\\fImax-args\\fR arguments will be used if the size (see the \\-s option)\nis exceeded, unless the \\-x option is given, in which case \\fBxargs\\fR\nwill exit.\n.TP\n.I \"\\-\\-interactive, \\-p\"\nPrompt the user about whether to run each command line and read a line\nfrom the terminal.  Only run the command line if the response starts\nwith 'y' or 'Y'.  Implies \\fI\\-t\\fR.\n.TP\n.I \"\\-\\-no-run-if-empty, \\-r\"\nIf the standard input does not contain any nonblanks, do not run the\ncommand.  Normally, the command is run once even if there is no input.\n.TP\n.I \"\\-\\-max-chars=max-chars, \\-s max-chars\"\nUse at most \\fImax-chars\\fR characters per command line, including the\ncommand and initial arguments and the terminating nulls at the ends of\nthe argument strings.  The default is as large as possible, up to 20k\ncharacters.\n.TP\n.I \"\\-\\-verbose, \\-t\"\nPrint the command line on the standard error output before executing\nit.\n.TP\n.I \"\\-\\-version\"\nPrint the version number of\n.B xargs\nand exit.\n.TP\n.I \"\\-\\-exit, \\-x\"\nExit if the size (see the \\fI\\-s\\fR option) is exceeded.\n.TP\n.I \"\\-\\-max-procs=max-procs, \\-P max-procs\"\nRun up to \\fImax-procs\\fR processes at a time; the default is 1.  If\n\\fImax-procs\\fR is 0, \\fBxargs\\fR will run as many processes as\npossible at a time.  Use the \\fI\\-n\\fR option with \\fI\\-P\\fR;\notherwise chances are that only one exec will be done.\n.SH \"SEE ALSO\"\n\\fBfind\\fP(1L), \\fBlocate\\fP(1L), \\fBlocatedb\\fP(5L), \\fBupdatedb\\fP(1)\n\\fBFinding Files\\fP (on-line in Info, or printed)`)\n\n// curl -s https://api.github.com/users/mreiferson/repos\nvar testDataJSON = []byte(`\n[\n  {\n    \"id\": 19041094,\n    \"name\": \"2014-talks\",\n    \"full_name\": \"mreiferson/2014-talks\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/2014-talks\",\n    \"description\": \"This is the official repository for slides and talks from GopherCon 2014\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/2014-talks\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/2014-talks/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/2014-talks/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/2014-talks/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/2014-talks/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/2014-talks/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/2014-talks/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/2014-talks/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/2014-talks/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/2014-talks/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/2014-talks/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/2014-talks/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/2014-talks/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/2014-talks/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/2014-talks/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/2014-talks/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/2014-talks/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/2014-talks/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/2014-talks/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/2014-talks/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/2014-talks/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/2014-talks/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/2014-talks/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/2014-talks/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/2014-talks/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/2014-talks/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/2014-talks/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/2014-talks/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/2014-talks/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/2014-talks/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/2014-talks/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/2014-talks/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/2014-talks/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/2014-talks/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/2014-talks/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/2014-talks/releases{/id}\",\n    \"created_at\": \"2014-04-22T18:28:59Z\",\n    \"updated_at\": \"2014-04-26T03:10:39Z\",\n    \"pushed_at\": \"2014-04-25T14:46:35Z\",\n    \"git_url\": \"git://github.com/mreiferson/2014-talks.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/2014-talks.git\",\n    \"clone_url\": \"https://github.com/mreiferson/2014-talks.git\",\n    \"svn_url\": \"https://github.com/mreiferson/2014-talks\",\n    \"homepage\": null,\n    \"size\": 3596,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": null,\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 3329246,\n    \"name\": \"asyncdynamo\",\n    \"full_name\": \"mreiferson/asyncdynamo\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/asyncdynamo\",\n    \"description\": \"async Amazon DynamoDB library for Tornado\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/asyncdynamo\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/asyncdynamo/releases{/id}\",\n    \"created_at\": \"2012-02-01T21:32:54Z\",\n    \"updated_at\": \"2014-04-03T21:58:44Z\",\n    \"pushed_at\": \"2012-02-01T21:06:23Z\",\n    \"git_url\": \"git://github.com/mreiferson/asyncdynamo.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/asyncdynamo.git\",\n    \"clone_url\": \"https://github.com/mreiferson/asyncdynamo.git\",\n    \"svn_url\": \"https://github.com/mreiferson/asyncdynamo\",\n    \"homepage\": \"\",\n    \"size\": 73,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Python\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 2622445,\n    \"name\": \"asyncmongo\",\n    \"full_name\": \"mreiferson/asyncmongo\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/asyncmongo\",\n    \"description\": \"An asynchronous library for accessing mongo with tornado.ioloop\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/asyncmongo\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/asyncmongo/releases{/id}\",\n    \"created_at\": \"2011-10-21T19:01:05Z\",\n    \"updated_at\": \"2013-01-04T11:58:26Z\",\n    \"pushed_at\": \"2011-10-21T19:02:46Z\",\n    \"git_url\": \"git://github.com/mreiferson/asyncmongo.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/asyncmongo.git\",\n    \"clone_url\": \"https://github.com/mreiferson/asyncmongo.git\",\n    \"svn_url\": \"https://github.com/mreiferson/asyncmongo\",\n    \"homepage\": \"http://github.com/bitly/asyncmongo\",\n    \"size\": 563,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Python\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 4554560,\n    \"name\": \"blog.perplexedlabs.com\",\n    \"full_name\": \"mreiferson/blog.perplexedlabs.com\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/blog.perplexedlabs.com\",\n    \"description\": \"archive of posts\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/releases{/id}\",\n    \"created_at\": \"2012-06-05T01:38:40Z\",\n    \"updated_at\": \"2014-04-27T23:44:56Z\",\n    \"pushed_at\": \"2014-04-27T23:44:56Z\",\n    \"git_url\": \"git://github.com/mreiferson/blog.perplexedlabs.com.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/blog.perplexedlabs.com.git\",\n    \"clone_url\": \"https://github.com/mreiferson/blog.perplexedlabs.com.git\",\n    \"svn_url\": \"https://github.com/mreiferson/blog.perplexedlabs.com\",\n    \"homepage\": \"http://blog.perplexedlabs.com/\",\n    \"size\": 668,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": null,\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 2861903,\n    \"name\": \"btpath\",\n    \"full_name\": \"mreiferson/btpath\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/btpath\",\n    \"description\": \"A* implementation/test app (1997)\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/btpath\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/btpath/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/btpath/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/btpath/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/btpath/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/btpath/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/btpath/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/btpath/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/btpath/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/btpath/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/btpath/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/btpath/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/btpath/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/btpath/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/btpath/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/btpath/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/btpath/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/btpath/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/btpath/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/btpath/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/btpath/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/btpath/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/btpath/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/btpath/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/btpath/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/btpath/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/btpath/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/btpath/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/btpath/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/btpath/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/btpath/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/btpath/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/btpath/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/btpath/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/btpath/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/btpath/releases{/id}\",\n    \"created_at\": \"2011-11-27T17:23:02Z\",\n    \"updated_at\": \"2013-01-04T17:58:42Z\",\n    \"pushed_at\": \"2011-11-29T01:36:49Z\",\n    \"git_url\": \"git://github.com/mreiferson/btpath.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/btpath.git\",\n    \"clone_url\": \"https://github.com/mreiferson/btpath.git\",\n    \"svn_url\": \"https://github.com/mreiferson/btpath\",\n    \"homepage\": \"\",\n    \"size\": 88,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"C++\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 15747148,\n    \"name\": \"chef-nsq\",\n    \"full_name\": \"mreiferson/chef-nsq\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/chef-nsq\",\n    \"description\": \"Chef Cookbook for NSQ\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/chef-nsq\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/chef-nsq/releases{/id}\",\n    \"created_at\": \"2014-01-08T20:27:41Z\",\n    \"updated_at\": \"2014-04-28T14:15:50Z\",\n    \"pushed_at\": \"2014-04-28T04:31:58Z\",\n    \"git_url\": \"git://github.com/mreiferson/chef-nsq.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/chef-nsq.git\",\n    \"clone_url\": \"https://github.com/mreiferson/chef-nsq.git\",\n    \"svn_url\": \"https://github.com/mreiferson/chef-nsq\",\n    \"homepage\": null,\n    \"size\": 132,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Ruby\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": false,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 5287337,\n    \"name\": \"dablooms\",\n    \"full_name\": \"mreiferson/dablooms\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/dablooms\",\n    \"description\": \"scaling, counting, bloom filter library\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/dablooms\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/dablooms/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/dablooms/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/dablooms/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/dablooms/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/dablooms/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/dablooms/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/dablooms/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/dablooms/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/dablooms/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/dablooms/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/dablooms/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/dablooms/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/dablooms/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/dablooms/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/dablooms/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/dablooms/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/dablooms/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/dablooms/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/dablooms/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/dablooms/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/dablooms/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/dablooms/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/dablooms/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/dablooms/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/dablooms/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/dablooms/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/dablooms/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/dablooms/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/dablooms/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/dablooms/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/dablooms/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/dablooms/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/dablooms/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/dablooms/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/dablooms/releases{/id}\",\n    \"created_at\": \"2012-08-03T16:03:50Z\",\n    \"updated_at\": \"2013-03-08T15:37:44Z\",\n    \"pushed_at\": \"2013-03-08T15:37:44Z\",\n    \"git_url\": \"git://github.com/mreiferson/dablooms.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/dablooms.git\",\n    \"clone_url\": \"https://github.com/mreiferson/dablooms.git\",\n    \"svn_url\": \"https://github.com/mreiferson/dablooms\",\n    \"homepage\": \"\",\n    \"size\": 186,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"C\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 2861959,\n    \"name\": \"dod\",\n    \"full_name\": \"mreiferson/dod\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/dod\",\n    \"description\": \"Do or Die - an incomplete real-time strategy game inspired by Warcraft (1997)\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/dod\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/dod/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/dod/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/dod/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/dod/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/dod/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/dod/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/dod/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/dod/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/dod/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/dod/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/dod/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/dod/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/dod/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/dod/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/dod/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/dod/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/dod/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/dod/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/dod/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/dod/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/dod/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/dod/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/dod/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/dod/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/dod/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/dod/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/dod/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/dod/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/dod/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/dod/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/dod/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/dod/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/dod/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/dod/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/dod/releases{/id}\",\n    \"created_at\": \"2011-11-27T17:33:19Z\",\n    \"updated_at\": \"2014-05-13T00:56:53Z\",\n    \"pushed_at\": \"2011-11-29T02:08:57Z\",\n    \"git_url\": \"git://github.com/mreiferson/dod.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/dod.git\",\n    \"clone_url\": \"https://github.com/mreiferson/dod.git\",\n    \"svn_url\": \"https://github.com/mreiferson/dod\",\n    \"homepage\": \"\",\n    \"size\": 2044,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"C++\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 4515792,\n    \"name\": \"doozer\",\n    \"full_name\": \"mreiferson/doozer\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/doozer\",\n    \"description\": \"Go client driver for doozerd, a consistent, distributed data store\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/doozer\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/doozer/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/doozer/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/doozer/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/doozer/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/doozer/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/doozer/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/doozer/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/doozer/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/doozer/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/doozer/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/doozer/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/doozer/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/doozer/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/doozer/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/doozer/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/doozer/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/doozer/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/doozer/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/doozer/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/doozer/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/doozer/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/doozer/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/doozer/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/doozer/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/doozer/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/doozer/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/doozer/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/doozer/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/doozer/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/doozer/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/doozer/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/doozer/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/doozer/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/doozer/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/doozer/releases{/id}\",\n    \"created_at\": \"2012-06-01T03:41:14Z\",\n    \"updated_at\": \"2013-03-16T15:23:56Z\",\n    \"pushed_at\": \"2013-03-16T15:23:55Z\",\n    \"git_url\": \"git://github.com/mreiferson/doozer.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/doozer.git\",\n    \"clone_url\": \"https://github.com/mreiferson/doozer.git\",\n    \"svn_url\": \"https://github.com/mreiferson/doozer\",\n    \"homepage\": \"https://github.com/ha/doozerd\",\n    \"size\": 2584,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 3391437,\n    \"name\": \"doozer-c\",\n    \"full_name\": \"mreiferson/doozer-c\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/doozer-c\",\n    \"description\": \"async C client library for doozerd\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/doozer-c\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/doozer-c/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/doozer-c/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/doozer-c/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/doozer-c/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/doozer-c/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/doozer-c/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/doozer-c/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/doozer-c/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/doozer-c/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/doozer-c/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/doozer-c/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/doozer-c/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/doozer-c/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/doozer-c/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/doozer-c/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/doozer-c/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/doozer-c/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/doozer-c/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/doozer-c/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/doozer-c/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/doozer-c/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/doozer-c/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/doozer-c/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/doozer-c/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/doozer-c/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/doozer-c/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/doozer-c/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/doozer-c/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/doozer-c/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/doozer-c/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/doozer-c/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/doozer-c/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/doozer-c/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/doozer-c/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/doozer-c/releases{/id}\",\n    \"created_at\": \"2012-02-08T21:15:33Z\",\n    \"updated_at\": \"2014-04-03T21:58:49Z\",\n    \"pushed_at\": \"2012-11-21T16:46:25Z\",\n    \"git_url\": \"git://github.com/mreiferson/doozer-c.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/doozer-c.git\",\n    \"clone_url\": \"https://github.com/mreiferson/doozer-c.git\",\n    \"svn_url\": \"https://github.com/mreiferson/doozer-c\",\n    \"homepage\": \"\",\n    \"size\": 158,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"C\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 4515795,\n    \"name\": \"doozerd\",\n    \"full_name\": \"mreiferson/doozerd\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/doozerd\",\n    \"description\": \"A consistent distributed data store.\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/doozerd\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/doozerd/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/doozerd/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/doozerd/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/doozerd/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/doozerd/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/doozerd/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/doozerd/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/doozerd/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/doozerd/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/doozerd/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/doozerd/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/doozerd/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/doozerd/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/doozerd/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/doozerd/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/doozerd/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/doozerd/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/doozerd/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/doozerd/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/doozerd/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/doozerd/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/doozerd/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/doozerd/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/doozerd/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/doozerd/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/doozerd/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/doozerd/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/doozerd/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/doozerd/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/doozerd/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/doozerd/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/doozerd/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/doozerd/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/doozerd/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/doozerd/releases{/id}\",\n    \"created_at\": \"2012-06-01T03:41:32Z\",\n    \"updated_at\": \"2013-12-28T19:22:30Z\",\n    \"pushed_at\": \"2013-12-28T19:22:30Z\",\n    \"git_url\": \"git://github.com/mreiferson/doozerd.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/doozerd.git\",\n    \"clone_url\": \"https://github.com/mreiferson/doozerd.git\",\n    \"svn_url\": \"https://github.com/mreiferson/doozerd\",\n    \"homepage\": \"\",\n    \"size\": 3135,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 8172002,\n    \"name\": \"e\",\n    \"full_name\": \"mreiferson/e\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/e\",\n    \"description\": \"Library containing high-performance datastructures and utilities for C++\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/e\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/e/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/e/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/e/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/e/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/e/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/e/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/e/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/e/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/e/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/e/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/e/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/e/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/e/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/e/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/e/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/e/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/e/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/e/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/e/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/e/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/e/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/e/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/e/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/e/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/e/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/e/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/e/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/e/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/e/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/e/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/e/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/e/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/e/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/e/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/e/releases{/id}\",\n    \"created_at\": \"2013-02-13T02:42:55Z\",\n    \"updated_at\": \"2013-02-18T21:10:07Z\",\n    \"pushed_at\": \"2013-02-13T02:45:16Z\",\n    \"git_url\": \"git://github.com/mreiferson/e.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/e.git\",\n    \"clone_url\": \"https://github.com/mreiferson/e.git\",\n    \"svn_url\": \"https://github.com/mreiferson/e\",\n    \"homepage\": \"\",\n    \"size\": 437,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"C++\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 2792604,\n    \"name\": \"encfs-macfusion2\",\n    \"full_name\": \"mreiferson/encfs-macfusion2\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/encfs-macfusion2\",\n    \"description\": \"enhanced version of encfs-macfusion2 plugin http://code.google.com/p/encfs-macfusion2/\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/encfs-macfusion2/releases{/id}\",\n    \"created_at\": \"2011-11-17T01:58:01Z\",\n    \"updated_at\": \"2013-10-22T06:29:03Z\",\n    \"pushed_at\": \"2011-11-17T02:13:15Z\",\n    \"git_url\": \"git://github.com/mreiferson/encfs-macfusion2.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/encfs-macfusion2.git\",\n    \"clone_url\": \"https://github.com/mreiferson/encfs-macfusion2.git\",\n    \"svn_url\": \"https://github.com/mreiferson/encfs-macfusion2\",\n    \"homepage\": \"\",\n    \"size\": 195,\n    \"stargazers_count\": 4,\n    \"watchers_count\": 4,\n    \"language\": \"Objective-C\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 1,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 1,\n    \"open_issues\": 0,\n    \"watchers\": 4,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 5263991,\n    \"name\": \"file2http\",\n    \"full_name\": \"mreiferson/file2http\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/file2http\",\n    \"description\": \"spray a line-oriented file at an HTTP endpoint\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/file2http\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/file2http/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/file2http/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/file2http/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/file2http/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/file2http/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/file2http/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/file2http/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/file2http/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/file2http/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/file2http/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/file2http/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/file2http/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/file2http/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/file2http/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/file2http/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/file2http/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/file2http/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/file2http/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/file2http/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/file2http/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/file2http/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/file2http/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/file2http/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/file2http/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/file2http/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/file2http/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/file2http/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/file2http/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/file2http/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/file2http/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/file2http/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/file2http/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/file2http/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/file2http/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/file2http/releases{/id}\",\n    \"created_at\": \"2012-08-01T19:56:16Z\",\n    \"updated_at\": \"2013-01-11T13:21:40Z\",\n    \"pushed_at\": \"2012-12-21T15:44:32Z\",\n    \"git_url\": \"git://github.com/mreiferson/file2http.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/file2http.git\",\n    \"clone_url\": \"https://github.com/mreiferson/file2http.git\",\n    \"svn_url\": \"https://github.com/mreiferson/file2http\",\n    \"homepage\": \"\",\n    \"size\": 96,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 15291117,\n    \"name\": \"gablog\",\n    \"full_name\": \"mreiferson/gablog\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/gablog\",\n    \"description\": \"Gopher Academy Blog -- fork of go.blog\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/gablog\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/gablog/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/gablog/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/gablog/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/gablog/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/gablog/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/gablog/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/gablog/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/gablog/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/gablog/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/gablog/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/gablog/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/gablog/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/gablog/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/gablog/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/gablog/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/gablog/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/gablog/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/gablog/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/gablog/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/gablog/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/gablog/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/gablog/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/gablog/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/gablog/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/gablog/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/gablog/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/gablog/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/gablog/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/gablog/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/gablog/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/gablog/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/gablog/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/gablog/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/gablog/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/gablog/releases{/id}\",\n    \"created_at\": \"2013-12-18T18:38:37Z\",\n    \"updated_at\": \"2013-12-20T22:39:33Z\",\n    \"pushed_at\": \"2013-12-20T22:21:52Z\",\n    \"git_url\": \"git://github.com/mreiferson/gablog.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/gablog.git\",\n    \"clone_url\": \"https://github.com/mreiferson/gablog.git\",\n    \"svn_url\": \"https://github.com/mreiferson/gablog\",\n    \"homepage\": \"http://blog.gopheracademy.com\",\n    \"size\": 7911,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"CSS\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 12223286,\n    \"name\": \"git-open-pull\",\n    \"full_name\": \"mreiferson/git-open-pull\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/git-open-pull\",\n    \"description\": \"convert a github issue into a pull request\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/git-open-pull\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/git-open-pull/releases{/id}\",\n    \"created_at\": \"2013-08-19T17:45:47Z\",\n    \"updated_at\": \"2014-03-03T19:50:11Z\",\n    \"pushed_at\": \"2014-03-03T19:50:09Z\",\n    \"git_url\": \"git://github.com/mreiferson/git-open-pull.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/git-open-pull.git\",\n    \"clone_url\": \"https://github.com/mreiferson/git-open-pull.git\",\n    \"svn_url\": \"https://github.com/mreiferson/git-open-pull\",\n    \"homepage\": \"https://github.com/jehiah/git-open-pull\",\n    \"size\": 155,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Shell\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 9547968,\n    \"name\": \"go-hostpool\",\n    \"full_name\": \"mreiferson/go-hostpool\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-hostpool\",\n    \"description\": \"Intelligently and flexibly pool among multiple hosts from your Go application\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-hostpool\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-hostpool/releases{/id}\",\n    \"created_at\": \"2013-04-19T15:06:04Z\",\n    \"updated_at\": \"2013-04-30T14:17:45Z\",\n    \"pushed_at\": \"2013-04-30T14:17:44Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-hostpool.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-hostpool.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-hostpool.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-hostpool\",\n    \"homepage\": null,\n    \"size\": 98,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 3488675,\n    \"name\": \"go-httpclient\",\n    \"full_name\": \"mreiferson/go-httpclient\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-httpclient\",\n    \"description\": \"a Go HTTP client with timeouts\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-httpclient\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-httpclient/releases{/id}\",\n    \"created_at\": \"2012-02-19T21:51:42Z\",\n    \"updated_at\": \"2014-07-19T16:41:18Z\",\n    \"pushed_at\": \"2014-04-25T16:53:03Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-httpclient.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-httpclient.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-httpclient.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-httpclient\",\n    \"homepage\": \"\",\n    \"size\": 362,\n    \"stargazers_count\": 167,\n    \"watchers_count\": 167,\n    \"language\": \"Go\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": false,\n    \"forks_count\": 21,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 21,\n    \"open_issues\": 0,\n    \"watchers\": 167,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 3924124,\n    \"name\": \"go-install-as\",\n    \"full_name\": \"mreiferson/go-install-as\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-install-as\",\n    \"description\": \"a Go tool to install a package with a specific import path\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-install-as\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-install-as/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-install-as/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-install-as/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-install-as/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-install-as/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-install-as/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-install-as/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-install-as/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-install-as/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-install-as/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-install-as/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-install-as/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-install-as/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-install-as/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-install-as/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-install-as/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-install-as/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-install-as/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-install-as/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-install-as/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-install-as/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-install-as/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-install-as/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-install-as/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-install-as/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-install-as/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-install-as/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-install-as/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-install-as/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-install-as/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-install-as/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-install-as/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-install-as/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-install-as/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-install-as/releases{/id}\",\n    \"created_at\": \"2012-04-04T00:17:37Z\",\n    \"updated_at\": \"2014-06-29T20:11:46Z\",\n    \"pushed_at\": \"2012-09-24T16:08:50Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-install-as.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-install-as.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-install-as.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-install-as\",\n    \"homepage\": \"\",\n    \"size\": 107,\n    \"stargazers_count\": 53,\n    \"watchers_count\": 53,\n    \"language\": \"Shell\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": false,\n    \"forks_count\": 2,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 2,\n    \"open_issues\": 0,\n    \"watchers\": 53,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 4744067,\n    \"name\": \"go-notify\",\n    \"full_name\": \"mreiferson/go-notify\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-notify\",\n    \"description\": \"a Go package to observe notable events in a decoupled fashion\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-notify\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-notify/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-notify/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-notify/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-notify/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-notify/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-notify/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-notify/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-notify/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-notify/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-notify/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-notify/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-notify/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-notify/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-notify/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-notify/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-notify/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-notify/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-notify/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-notify/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-notify/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-notify/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-notify/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-notify/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-notify/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-notify/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-notify/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-notify/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-notify/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-notify/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-notify/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-notify/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-notify/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-notify/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-notify/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-notify/releases{/id}\",\n    \"created_at\": \"2012-06-21T20:30:43Z\",\n    \"updated_at\": \"2013-01-10T18:07:58Z\",\n    \"pushed_at\": \"2012-06-21T20:30:22Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-notify.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-notify.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-notify.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-notify\",\n    \"homepage\": null,\n    \"size\": 68,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 12449360,\n    \"name\": \"go-nsq\",\n    \"full_name\": \"mreiferson/go-nsq\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-nsq\",\n    \"description\": \"the official Go package for NSQ\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-nsq\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-nsq/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-nsq/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-nsq/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-nsq/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-nsq/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-nsq/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-nsq/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-nsq/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-nsq/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-nsq/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-nsq/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-nsq/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-nsq/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-nsq/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-nsq/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-nsq/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-nsq/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-nsq/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-nsq/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-nsq/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-nsq/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-nsq/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-nsq/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-nsq/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-nsq/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-nsq/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-nsq/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-nsq/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-nsq/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-nsq/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-nsq/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-nsq/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-nsq/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-nsq/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-nsq/releases{/id}\",\n    \"created_at\": \"2013-08-29T02:07:54Z\",\n    \"updated_at\": \"2014-06-29T13:56:36Z\",\n    \"pushed_at\": \"2014-07-20T16:44:32Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-nsq.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-nsq.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-nsq.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-nsq\",\n    \"homepage\": \"\",\n    \"size\": 1783,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": false,\n    \"forks_count\": 1,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 1,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 16654468,\n    \"name\": \"go-options\",\n    \"full_name\": \"mreiferson/go-options\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-options\",\n    \"description\": \"a Go package to structure and resolve options\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-options\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-options/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-options/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-options/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-options/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-options/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-options/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-options/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-options/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-options/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-options/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-options/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-options/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-options/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-options/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-options/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-options/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-options/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-options/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-options/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-options/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-options/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-options/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-options/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-options/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-options/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-options/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-options/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-options/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-options/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-options/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-options/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-options/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-options/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-options/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-options/releases{/id}\",\n    \"created_at\": \"2014-02-08T22:19:33Z\",\n    \"updated_at\": \"2014-02-16T00:39:59Z\",\n    \"pushed_at\": \"2014-02-16T00:39:58Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-options.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-options.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-options.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-options\",\n    \"homepage\": null,\n    \"size\": 128,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Go\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 3924909,\n    \"name\": \"go-simplejson\",\n    \"full_name\": \"mreiferson/go-simplejson\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-simplejson\",\n    \"description\": \"a Go package to interact with arbitrary JSON\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-simplejson\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-simplejson/releases{/id}\",\n    \"created_at\": \"2012-04-04T02:36:33Z\",\n    \"updated_at\": \"2014-06-25T01:24:01Z\",\n    \"pushed_at\": \"2014-06-30T15:13:50Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-simplejson.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-simplejson.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-simplejson.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-simplejson\",\n    \"homepage\": \"\",\n    \"size\": 210,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": false,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 8614089,\n    \"name\": \"go-simplelog\",\n    \"full_name\": \"mreiferson/go-simplelog\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-simplelog\",\n    \"description\": \"a simple logging package for Go (inspired by Tornado)\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-simplelog\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-simplelog/releases{/id}\",\n    \"created_at\": \"2013-03-06T21:53:48Z\",\n    \"updated_at\": \"2013-10-11T22:49:05Z\",\n    \"pushed_at\": \"2013-03-31T23:20:11Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-simplelog.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-simplelog.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-simplelog.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-simplelog\",\n    \"homepage\": null,\n    \"size\": 140,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Go\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": false,\n    \"forks_count\": 1,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 1,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 12498288,\n    \"name\": \"go-snappystream\",\n    \"full_name\": \"mreiferson/go-snappystream\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-snappystream\",\n    \"description\": \"a Go package for framed snappy streams\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-snappystream\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-snappystream/releases{/id}\",\n    \"created_at\": \"2013-08-31T00:41:11Z\",\n    \"updated_at\": \"2014-07-20T07:52:45Z\",\n    \"pushed_at\": \"2013-09-17T21:00:14Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-snappystream.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-snappystream.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-snappystream.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-snappystream\",\n    \"homepage\": null,\n    \"size\": 184,\n    \"stargazers_count\": 21,\n    \"watchers_count\": 21,\n    \"language\": \"Go\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 1,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 1,\n    \"open_issues\": 0,\n    \"watchers\": 21,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 5183238,\n    \"name\": \"go-stat\",\n    \"full_name\": \"mreiferson/go-stat\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-stat\",\n    \"description\": \"performant instrumentation/profiling for Go\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-stat\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-stat/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-stat/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-stat/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-stat/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-stat/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-stat/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-stat/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-stat/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-stat/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-stat/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-stat/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-stat/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-stat/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-stat/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-stat/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-stat/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-stat/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-stat/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-stat/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-stat/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-stat/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-stat/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-stat/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-stat/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-stat/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-stat/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-stat/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-stat/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-stat/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-stat/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-stat/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-stat/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-stat/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-stat/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-stat/releases{/id}\",\n    \"created_at\": \"2012-07-25T19:03:42Z\",\n    \"updated_at\": \"2014-01-10T04:39:14Z\",\n    \"pushed_at\": \"2012-07-25T19:04:37Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-stat.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-stat.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-stat.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-stat\",\n    \"homepage\": null,\n    \"size\": 96,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"Go\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 8662365,\n    \"name\": \"go-ujson\",\n    \"full_name\": \"mreiferson/go-ujson\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/go-ujson\",\n    \"description\": \"a pure Go port of ultrajson\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/go-ujson\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/go-ujson/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/go-ujson/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/go-ujson/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/go-ujson/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/go-ujson/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/go-ujson/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/go-ujson/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/go-ujson/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/go-ujson/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/go-ujson/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/go-ujson/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/go-ujson/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/go-ujson/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/go-ujson/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/go-ujson/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/go-ujson/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/go-ujson/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/go-ujson/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/go-ujson/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/go-ujson/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/go-ujson/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/go-ujson/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/go-ujson/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/go-ujson/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/go-ujson/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/go-ujson/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/go-ujson/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/go-ujson/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/go-ujson/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/go-ujson/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/go-ujson/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/go-ujson/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/go-ujson/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/go-ujson/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/go-ujson/releases{/id}\",\n    \"created_at\": \"2013-03-08T23:57:54Z\",\n    \"updated_at\": \"2014-06-26T01:50:40Z\",\n    \"pushed_at\": \"2013-11-10T19:49:16Z\",\n    \"git_url\": \"git://github.com/mreiferson/go-ujson.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/go-ujson.git\",\n    \"clone_url\": \"https://github.com/mreiferson/go-ujson.git\",\n    \"svn_url\": \"https://github.com/mreiferson/go-ujson\",\n    \"homepage\": \"\",\n    \"size\": 140,\n    \"stargazers_count\": 31,\n    \"watchers_count\": 31,\n    \"language\": \"Go\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 8,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 8,\n    \"open_issues\": 0,\n    \"watchers\": 31,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 12815437,\n    \"name\": \"godep\",\n    \"full_name\": \"mreiferson/godep\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/godep\",\n    \"description\": \"dependency tool for go\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/godep\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/godep/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/godep/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/godep/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/godep/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/godep/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/godep/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/godep/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/godep/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/godep/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/godep/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/godep/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/godep/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/godep/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/godep/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/godep/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/godep/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/godep/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/godep/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/godep/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/godep/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/godep/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/godep/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/godep/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/godep/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/godep/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/godep/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/godep/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/godep/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/godep/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/godep/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/godep/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/godep/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/godep/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/godep/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/godep/releases{/id}\",\n    \"created_at\": \"2013-09-13T17:36:10Z\",\n    \"updated_at\": \"2014-03-21T02:53:20Z\",\n    \"pushed_at\": \"2014-01-05T18:07:02Z\",\n    \"git_url\": \"git://github.com/mreiferson/godep.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/godep.git\",\n    \"clone_url\": \"https://github.com/mreiferson/godep.git\",\n    \"svn_url\": \"https://github.com/mreiferson/godep\",\n    \"homepage\": \"http://godoc.org/github.com/kr/godep\",\n    \"size\": 196,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Go\",\n    \"has_issues\": false,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 2862096,\n    \"name\": \"hajiworld\",\n    \"full_name\": \"mreiferson/hajiworld\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/hajiworld\",\n    \"description\": \"super mario clone (1999)\",\n    \"fork\": false,\n    \"url\": \"https://api.github.com/repos/mreiferson/hajiworld\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/hajiworld/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/hajiworld/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/hajiworld/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/hajiworld/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/hajiworld/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/hajiworld/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/hajiworld/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/hajiworld/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/hajiworld/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/hajiworld/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/hajiworld/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/hajiworld/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/hajiworld/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/hajiworld/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/hajiworld/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/hajiworld/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/hajiworld/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/hajiworld/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/hajiworld/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/hajiworld/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/hajiworld/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/hajiworld/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/hajiworld/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/hajiworld/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/hajiworld/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/hajiworld/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/hajiworld/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/hajiworld/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/hajiworld/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/hajiworld/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/hajiworld/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/hajiworld/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/hajiworld/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/hajiworld/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/hajiworld/releases{/id}\",\n    \"created_at\": \"2011-11-27T18:05:02Z\",\n    \"updated_at\": \"2014-01-08T14:10:43Z\",\n    \"pushed_at\": \"2011-11-29T02:49:49Z\",\n    \"git_url\": \"git://github.com/mreiferson/hajiworld.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/hajiworld.git\",\n    \"clone_url\": \"https://github.com/mreiferson/hajiworld.git\",\n    \"svn_url\": \"https://github.com/mreiferson/hajiworld\",\n    \"homepage\": \"\",\n    \"size\": 27872,\n    \"stargazers_count\": 1,\n    \"watchers_count\": 1,\n    \"language\": \"C++\",\n    \"has_issues\": true,\n    \"has_downloads\": true,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 1,\n    \"default_branch\": \"master\"\n  },\n  {\n    \"id\": 14853562,\n    \"name\": \"homebrew\",\n    \"full_name\": \"mreiferson/homebrew\",\n    \"owner\": {\n      \"login\": \"mreiferson\",\n      \"id\": 187441,\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/187441?\",\n      \"gravatar_id\": \"dd56a8e1de66aeedb987397511f830e7\",\n      \"url\": \"https://api.github.com/users/mreiferson\",\n      \"html_url\": \"https://github.com/mreiferson\",\n      \"followers_url\": \"https://api.github.com/users/mreiferson/followers\",\n      \"following_url\": \"https://api.github.com/users/mreiferson/following{/other_user}\",\n      \"gists_url\": \"https://api.github.com/users/mreiferson/gists{/gist_id}\",\n      \"starred_url\": \"https://api.github.com/users/mreiferson/starred{/owner}{/repo}\",\n      \"subscriptions_url\": \"https://api.github.com/users/mreiferson/subscriptions\",\n      \"organizations_url\": \"https://api.github.com/users/mreiferson/orgs\",\n      \"repos_url\": \"https://api.github.com/users/mreiferson/repos\",\n      \"events_url\": \"https://api.github.com/users/mreiferson/events{/privacy}\",\n      \"received_events_url\": \"https://api.github.com/users/mreiferson/received_events\",\n      \"type\": \"User\",\n      \"site_admin\": false\n    },\n    \"private\": false,\n    \"html_url\": \"https://github.com/mreiferson/homebrew\",\n    \"description\": \"The missing package manager for OS X.\",\n    \"fork\": true,\n    \"url\": \"https://api.github.com/repos/mreiferson/homebrew\",\n    \"forks_url\": \"https://api.github.com/repos/mreiferson/homebrew/forks\",\n    \"keys_url\": \"https://api.github.com/repos/mreiferson/homebrew/keys{/key_id}\",\n    \"collaborators_url\": \"https://api.github.com/repos/mreiferson/homebrew/collaborators{/collaborator}\",\n    \"teams_url\": \"https://api.github.com/repos/mreiferson/homebrew/teams\",\n    \"hooks_url\": \"https://api.github.com/repos/mreiferson/homebrew/hooks\",\n    \"issue_events_url\": \"https://api.github.com/repos/mreiferson/homebrew/issues/events{/number}\",\n    \"events_url\": \"https://api.github.com/repos/mreiferson/homebrew/events\",\n    \"assignees_url\": \"https://api.github.com/repos/mreiferson/homebrew/assignees{/user}\",\n    \"branches_url\": \"https://api.github.com/repos/mreiferson/homebrew/branches{/branch}\",\n    \"tags_url\": \"https://api.github.com/repos/mreiferson/homebrew/tags\",\n    \"blobs_url\": \"https://api.github.com/repos/mreiferson/homebrew/git/blobs{/sha}\",\n    \"git_tags_url\": \"https://api.github.com/repos/mreiferson/homebrew/git/tags{/sha}\",\n    \"git_refs_url\": \"https://api.github.com/repos/mreiferson/homebrew/git/refs{/sha}\",\n    \"trees_url\": \"https://api.github.com/repos/mreiferson/homebrew/git/trees{/sha}\",\n    \"statuses_url\": \"https://api.github.com/repos/mreiferson/homebrew/statuses/{sha}\",\n    \"languages_url\": \"https://api.github.com/repos/mreiferson/homebrew/languages\",\n    \"stargazers_url\": \"https://api.github.com/repos/mreiferson/homebrew/stargazers\",\n    \"contributors_url\": \"https://api.github.com/repos/mreiferson/homebrew/contributors\",\n    \"subscribers_url\": \"https://api.github.com/repos/mreiferson/homebrew/subscribers\",\n    \"subscription_url\": \"https://api.github.com/repos/mreiferson/homebrew/subscription\",\n    \"commits_url\": \"https://api.github.com/repos/mreiferson/homebrew/commits{/sha}\",\n    \"git_commits_url\": \"https://api.github.com/repos/mreiferson/homebrew/git/commits{/sha}\",\n    \"comments_url\": \"https://api.github.com/repos/mreiferson/homebrew/comments{/number}\",\n    \"issue_comment_url\": \"https://api.github.com/repos/mreiferson/homebrew/issues/comments/{number}\",\n    \"contents_url\": \"https://api.github.com/repos/mreiferson/homebrew/contents/{+path}\",\n    \"compare_url\": \"https://api.github.com/repos/mreiferson/homebrew/compare/{base}...{head}\",\n    \"merges_url\": \"https://api.github.com/repos/mreiferson/homebrew/merges\",\n    \"archive_url\": \"https://api.github.com/repos/mreiferson/homebrew/{archive_format}{/ref}\",\n    \"downloads_url\": \"https://api.github.com/repos/mreiferson/homebrew/downloads\",\n    \"issues_url\": \"https://api.github.com/repos/mreiferson/homebrew/issues{/number}\",\n    \"pulls_url\": \"https://api.github.com/repos/mreiferson/homebrew/pulls{/number}\",\n    \"milestones_url\": \"https://api.github.com/repos/mreiferson/homebrew/milestones{/number}\",\n    \"notifications_url\": \"https://api.github.com/repos/mreiferson/homebrew/notifications{?since,all,participating}\",\n    \"labels_url\": \"https://api.github.com/repos/mreiferson/homebrew/labels{/name}\",\n    \"releases_url\": \"https://api.github.com/repos/mreiferson/homebrew/releases{/id}\",\n    \"created_at\": \"2013-12-02T05:20:40Z\",\n    \"updated_at\": \"2014-02-17T17:19:19Z\",\n    \"pushed_at\": \"2014-02-17T17:06:03Z\",\n    \"git_url\": \"git://github.com/mreiferson/homebrew.git\",\n    \"ssh_url\": \"git@github.com:mreiferson/homebrew.git\",\n    \"clone_url\": \"https://github.com/mreiferson/homebrew.git\",\n    \"svn_url\": \"https://github.com/mreiferson/homebrew\",\n    \"homepage\": \"http://brew.sh\",\n    \"size\": 29725,\n    \"stargazers_count\": 0,\n    \"watchers_count\": 0,\n    \"language\": \"Ruby\",\n    \"has_issues\": false,\n    \"has_downloads\": false,\n    \"has_wiki\": true,\n    \"forks_count\": 0,\n    \"mirror_url\": null,\n    \"open_issues_count\": 0,\n    \"forks\": 0,\n    \"open_issues\": 0,\n    \"watchers\": 0,\n    \"default_branch\": \"master\"\n  }\n]\n`)\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/reader.go",
    "content": "package snappystream\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"hash/crc32\"\n\t\"io\"\n\t\"io/ioutil\"\n\n\t\"code.google.com/p/snappy-go/snappy\"\n)\n\n// errMssingStreamID is returned from a reader when the source stream does not\n// begin with a stream identifier block (4.1 Stream identifier).  Its occurance\n// signifies that the source byte stream is not snappy framed.\nvar errMissingStreamID = fmt.Errorf(\"missing stream identifier\")\n\ntype reader struct {\n\treader io.Reader\n\n\terr error\n\n\tseenStreamID   bool\n\tverifyChecksum bool\n\n\tbuf bytes.Buffer\n\thdr []byte\n\tsrc []byte\n\tdst []byte\n}\n\n// NewReader returns an io.Reader interface to the snappy framed stream format.\n//\n// It transparently handles reading the stream identifier (but does not proxy this\n// to the caller), decompresses blocks, and (optionally) validates checksums.\n//\n// Internally, three buffers are maintained.  The first two are for reading\n// off the wrapped io.Reader and for holding the decompressed block (both are grown\n// automatically and re-used and will never exceed the largest block size, 65536). The\n// last buffer contains the *unread* decompressed bytes (and can grow indefinitely).\n//\n// The second param determines whether or not the reader will verify block\n// checksums and can be enabled/disabled with the constants VerifyChecksum and SkipVerifyChecksum\n//\n// For each Read, the returned length will be up to the lesser of len(b) or 65536\n// decompressed bytes, regardless of the length of *compressed* bytes read\n// from the wrapped io.Reader.\nfunc NewReader(r io.Reader, verifyChecksum bool) io.Reader {\n\treturn &reader{\n\t\treader: r,\n\n\t\tverifyChecksum: verifyChecksum,\n\n\t\thdr: make([]byte, 4),\n\t\tsrc: make([]byte, 4096),\n\t\tdst: make([]byte, 4096),\n\t}\n}\n\n// WriteTo implements the io.WriterTo interface used by io.Copy.  It writes\n// decoded data from the underlying reader to w.  WriteTo returns the number of\n// bytes written along with any error encountered.\nfunc (r *reader) WriteTo(w io.Writer) (int64, error) {\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\n\tn, err := r.buf.WriteTo(w)\n\tif err != nil {\n\t\t// r.err doesn't need to be set because a write error occurred and the\n\t\t// stream hasn't been corrupted.\n\t\treturn n, err\n\t}\n\n\t// pass a bufferFallbackWriter to nextFrame so that write errors may be\n\t// recovered from, allowing the unwritten stream to be read successfully.\n\twfallback := &bufferFallbackWriter{\n\t\tw:   w,\n\t\tbuf: &r.buf,\n\t}\n\tfor {\n\t\tvar m int\n\t\tm, err = r.nextFrame(wfallback)\n\t\tif wfallback.writerErr != nil && err == nil {\n\t\t\t// a partial write was made before an error occurred and not all m\n\t\t\t// bytes were writen to w.  but decoded bytes were successfully\n\t\t\t// buffered and reading can resume later.\n\t\t\tn += wfallback.n\n\t\t\treturn n, wfallback.writerErr\n\t\t}\n\t\tn += int64(m)\n\t\tif err == io.EOF {\n\t\t\treturn n, nil\n\t\t}\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn n, err\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\n// bufferFallbackWriter writes to an underlying io.Writer until an error\n// occurs.  If a error occurs in the underlying io.Writer the value is saved\n// for later inspection while the bufferFallbackWriter silently starts\n// buffering all data written to it. From the caller's perspective\n// bufferFallbackWriter has the same Write behavior has a bytes.Buffer.\n//\n// bufferFallbackWriter is useful for the reader.WriteTo method because it\n// allows internal decoding routines to avoid interruption (and subsequent\n// stream corruption) due to writing errors.\ntype bufferFallbackWriter struct {\n\tw         io.Writer\n\tbuf       *bytes.Buffer\n\tn         int64 // number of bytes successfully written to w\n\twriterErr error // any error that ocurred writing to w\n}\n\n// Write attempts to write b to the underlying io.Writer.  If the underlying\n// writer fails or has failed previously unwritten bytes are buffered\n// internally.  Write never returns an error but may panic with\n// bytes.ErrTooLarge if the buffer grows too large.\nfunc (w *bufferFallbackWriter) Write(b []byte) (int, error) {\n\tif w.writerErr != nil {\n\t\treturn w.buf.Write(b)\n\t}\n\tn, err := w.w.Write(b)\n\tw.n += int64(n)\n\tif err != nil {\n\t\t// begin buffering input. bytes.Buffer does not return errors and so we\n\t\t// do not need complex error handling here.\n\t\tw.writerErr = err\n\t\tw.Write(b[n:])\n\t\treturn len(b), nil\n\t}\n\treturn n, nil\n}\n\nfunc (r *reader) read(b []byte) (int, error) {\n\tn, err := r.buf.Read(b)\n\tr.err = err\n\treturn n, err\n}\n\nfunc (r *reader) Read(b []byte) (int, error) {\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\n\tif r.buf.Len() < len(b) {\n\t\t_, r.err = r.nextFrame(&r.buf)\n\t\tif r.err == io.EOF {\n\t\t\t// fill b with any remaining bytes in the buffer.\n\t\t\treturn r.read(b)\n\t\t}\n\t\tif r.err != nil {\n\t\t\treturn 0, r.err\n\t\t}\n\t}\n\n\treturn r.read(b)\n}\n\nfunc (r *reader) nextFrame(w io.Writer) (int, error) {\n\tfor {\n\t\t// read the 4-byte snappy frame header\n\t\t_, err := io.ReadFull(r.reader, r.hdr)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t// a stream identifier may appear anywhere and contains no information.\n\t\t// it must appear at the beginning of the stream.  when found, validate\n\t\t// it and continue to the next block.\n\t\tif r.hdr[0] == blockStreamIdentifier {\n\t\t\terr := r.readStreamID()\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tr.seenStreamID = true\n\t\t\tcontinue\n\t\t}\n\t\tif !r.seenStreamID {\n\t\t\treturn 0, errMissingStreamID\n\t\t}\n\n\t\tswitch typ := r.hdr[0]; {\n\t\tcase typ == blockCompressed || typ == blockUncompressed:\n\t\t\treturn r.decodeBlock(w)\n\t\tcase typ == blockPadding || (0x80 <= typ && typ <= 0xfd):\n\t\t\t// skip blocks whose data must not be inspected (4.4 Padding, and 4.6\n\t\t\t// Reserved skippable chunks).\n\t\t\terr := r.discardBlock()\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tcontinue\n\t\tdefault:\n\t\t\t// typ must be unskippable range 0x02-0x7f.  Read the block in full\n\t\t\t// and return an error (4.5 Reserved unskippable chunks).\n\t\t\terr = r.discardBlock()\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn 0, fmt.Errorf(\"unrecognized unskippable frame %#x\", r.hdr[0])\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\n// decodeDataBlock assumes r.hdr[0] to be either blockCompressed or\n// blockUncompressed.\nfunc (r *reader) decodeBlock(w io.Writer) (int, error) {\n\t// read compressed block data and determine if uncompressed data is too\n\t// large.\n\tbuf, err := r.readBlock()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdeclen := len(buf[4:])\n\tif r.hdr[0] == blockCompressed {\n\t\tdeclen, err = snappy.DecodedLen(buf[4:])\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tif declen > MaxBlockSize {\n\t\treturn 0, fmt.Errorf(\"decoded block data too large %d > %d\", declen, MaxBlockSize)\n\t}\n\n\t// decode data and verify its integrity using the little-endian crc32\n\t// preceding encoded data\n\tcrc32le, blockdata := buf[:4], buf[4:]\n\tif r.hdr[0] == blockCompressed {\n\t\tr.dst, err = snappy.Decode(r.dst, blockdata)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tblockdata = r.dst\n\t}\n\tif r.verifyChecksum {\n\t\tchecksum := unmaskChecksum(uint32(crc32le[0]) | uint32(crc32le[1])<<8 | uint32(crc32le[2])<<16 | uint32(crc32le[3])<<24)\n\t\tactualChecksum := crc32.Checksum(blockdata, crcTable)\n\t\tif checksum != actualChecksum {\n\t\t\treturn 0, fmt.Errorf(\"checksum does not match %x != %x\", checksum, actualChecksum)\n\t\t}\n\t}\n\treturn w.Write(blockdata)\n}\n\nfunc (r *reader) readStreamID() error {\n\t// the length of the block is fixed so don't decode it from the header.\n\tif !bytes.Equal(r.hdr, streamID[:4]) {\n\t\treturn fmt.Errorf(\"invalid stream identifier length\")\n\t}\n\n\t// read the identifier block data \"sNaPpY\"\n\tblock := r.src[:6]\n\t_, err := noeof(io.ReadFull(r.reader, block))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !bytes.Equal(block, streamID[4:]) {\n\t\treturn fmt.Errorf(\"invalid stream identifier block\")\n\t}\n\treturn nil\n}\n\nfunc (r *reader) discardBlock() error {\n\tlength := uint64(decodeLength(r.hdr[1:]))\n\t_, err := noeof64(io.CopyN(ioutil.Discard, r.reader, int64(length)))\n\treturn err\n}\n\nfunc (r *reader) readBlock() ([]byte, error) {\n\t// check bounds on encoded length (+4 for checksum)\n\tlength := decodeLength(r.hdr[1:])\n\tif length > (maxEncodedBlockSize + 4) {\n\t\treturn nil, fmt.Errorf(\"encoded block data too large %d > %d\", length, (maxEncodedBlockSize + 4))\n\t}\n\n\tif int(length) > len(r.src) {\n\t\tr.src = make([]byte, length)\n\t}\n\n\tbuf := r.src[:length]\n\t_, err := noeof(io.ReadFull(r.reader, buf))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}\n\n// decodeLength decodes a 24-bit (3-byte) little-endian length from b.\nfunc decodeLength(b []byte) uint32 {\n\treturn uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16\n}\n\nfunc unmaskChecksum(c uint32) uint32 {\n\tx := c - 0xa282ead8\n\treturn ((x >> 17) | (x << 15))\n}\n\n// noeof is used after reads in situations where EOF signifies invalid\n// formatting or corruption.\nfunc noeof(n int, err error) (int, error) {\n\tif err == io.EOF {\n\t\treturn n, io.ErrUnexpectedEOF\n\t}\n\treturn n, err\n}\n\n// noeof64 is used after long reads (e.g. io.Copy) in situations where io.EOF\n// signifies invalid formatting or corruption.\nfunc noeof64(n int64, err error) (int64, error) {\n\tif err == io.EOF {\n\t\treturn n, io.ErrUnexpectedEOF\n\t}\n\treturn n, err\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/reader_test.go",
    "content": "package snappystream\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"code.google.com/p/snappy-go/snappy\"\n)\n\n// This test checks that padding and reserved skippable blocks are ignored by\n// the reader.\nfunc TestReader_skippable(t *testing.T) {\n\tvar buf bytes.Buffer\n\t// write some blocks with injected padding/skippable blocks\n\tw := NewWriter(&buf)\n\twrite := func(p []byte) (int, error) {\n\t\treturn w.Write(p)\n\t}\n\twritepad := func(b byte, n int) (int, error) {\n\t\treturn buf.Write(opaqueChunk(b, n))\n\t}\n\t_, err := write([]byte(\"hello\"))\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\t_, err = writepad(0xfe, 100) // normal padding\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\t_, err = write([]byte(\" \"))\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\t_, err = writepad(0xa0, 100) // reserved skippable block\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\t_, err = writepad(0xfe, MaxBlockSize) // normal padding\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\t_, err = write([]byte(\"padding\"))\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\n\tp, err := ioutil.ReadAll(NewReader(&buf, true))\n\tif err != nil {\n\t\tt.Fatalf(\"read error: %v\", err)\n\t}\n\tif string(p) != \"hello padding\" {\n\t\tt.Fatalf(\"read: unexpected content %q\", string(p))\n\t}\n}\n\n// This test checks that reserved unskippable blocks are cause decoder errors.\nfunc TestReader_unskippable(t *testing.T) {\n\tvar buf bytes.Buffer\n\t// write some blocks with injected padding/skippable blocks\n\tw := NewWriter(&buf)\n\twrite := func(p []byte) (int, error) {\n\t\treturn w.Write(p)\n\t}\n\twritepad := func(b byte, n int) (int, error) {\n\t\treturn buf.Write(opaqueChunk(b, n))\n\t}\n\t_, err := write([]byte(\"unskippable\"))\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\t_, err = writepad(0x50, 100) // unskippable reserved block\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\t_, err = write([]byte(\" blocks\"))\n\tif err != nil {\n\t\tt.Fatalf(\"write error: %v\", err)\n\t}\n\n\t_, err = ioutil.ReadAll(NewReader(&buf, true))\n\tif err == nil {\n\t\tt.Fatalf(\"read success\")\n\t}\n}\n\nfunc TestReaderStreamID(t *testing.T) {\n\tdata := []byte(\"a snappy-framed data stream\")\n\tvar buf bytes.Buffer\n\tw := NewWriter(&buf)\n\t_, err := w.Write(data)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tstream := buf.Bytes()\n\n\t// sanity check: the stream can be decoded and starts with streamID\n\tr := NewReader(bytes.NewReader(stream), true)\n\t_, err = ioutil.ReadAll(r)\n\tif err != nil {\n\t\tt.Fatalf(\"read: %v\", err)\n\t}\n\tif !bytes.HasPrefix(stream, streamID) {\n\t\tt.Fatal(\"missing stream id\")\n\t}\n\n\t// streamNoID is valid except for a missing the streamID block\n\tstreamNoID := bytes.TrimPrefix(stream, streamID)\n\tr = NewReader(bytes.NewReader(streamNoID), true)\n\tn, err := r.Read(make([]byte, 1))\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected an error reading input missing a stream identifier block\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read non-zero number of bytes %d\", n)\n\t}\n\tn, err = r.Read(make([]byte, 1))\n\tif err == nil {\n\t\tt.Fatalf(\"read: successful read after missing stream id error\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read non-zero number of bytes %d after missing stream id error\", n)\n\t}\n}\n\n// This test validates the reader successfully decods a padding of maximal\n// size, 2^24 - 1.\nfunc TestReader_maxPad(t *testing.T) {\n\tbuf := bytes.NewReader(bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcompressedChunk(t, []byte(\"a maximal padding chunk\")),\n\t\topaqueChunk(0xfe, (1<<24)-1), // normal padding\n\t\tcompressedChunk(t, []byte(\" is decoded successfully\")),\n\t}, nil))\n\tr := NewReader(buf, true)\n\tp, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tt.Fatalf(\"read error: %v\", err)\n\t}\n\tif string(p) != \"a maximal padding chunk is decoded successfully\" {\n\t\tt.Fatalf(\"read: unexpected content %q\", string(p))\n\t}\n}\n\n// This test validates the reader successfully decodes a skippable chunk of\n// maximal size, 2^24 - 1.\nfunc TestReader_maxSkippable(t *testing.T) {\n\tbuf := bytes.NewReader(bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcompressedChunk(t, []byte(\"a maximal skippable chunk\")),\n\t\topaqueChunk(0xce, (1<<24)-1), // reserved skippable chunk\n\t\tcompressedChunk(t, []byte(\" is decoded successfully\")),\n\t}, nil))\n\tr := NewReader(buf, true)\n\tp, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tt.Fatalf(\"read error: %v\", err)\n\t}\n\tif string(p) != \"a maximal skippable chunk is decoded successfully\" {\n\t\tt.Fatalf(\"read: unexpected content %q\", string(p))\n\t}\n}\n\n// TestReader_maxBlock validates bounds checking on encoded and decoded data\n// (4.2 Compressed Data).\nfunc TestReader_maxBlock(t *testing.T) {\n\t// decompressing a block with compressed length greater than MaxBlockSize\n\t// should succeed.\n\tbuf := bytes.NewReader(bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcompressedChunkGreaterN(t, MaxBlockSize),\n\t}, nil))\n\tr := NewReader(buf, true)\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(b) != MaxBlockSize {\n\t\tt.Fatalf(\"bad read (%d bytes)\", len(b))\n\t}\n\n\t// decompressing should fail if the block with decompressed length greater\n\t// than MaxBlockSize.\n\tbuf = bytes.NewReader(bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcompressedChunk(t, make([]byte, MaxBlockSize+1)),\n\t}, nil))\n\tr = NewReader(buf, true)\n\tb, err = ioutil.ReadAll(r)\n\tif err == nil {\n\t\tt.Fatal(\"unexpected success\")\n\t}\n\tif len(b) > 0 {\n\t\tt.Fatalf(\"unexpected read %q\", b)\n\t}\n}\n\n// This test validates the reader's behavior encountering unskippable chunks of\n// maximal size, 2^24 - 1.  The desired error to in this case is one reporting\n// an unskippable chunk, not a length error.\nfunc TestReader_maxUnskippable(t *testing.T) {\n\t// the first block should be decoded successfully.\n\tprefix := \"a maximal unskippable chunk\"\n\tbuf := bytes.NewReader(bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcompressedChunk(t, []byte(prefix)),\n\t\topaqueChunk(0x03, (1<<24)-1), // low end of the unskippable range\n\t\tcompressedChunk(t, []byte(\" failure must be reported as such\")),\n\t}, nil))\n\tp := make([]byte, len(prefix))\n\tr := NewReader(buf, true)\n\tn, err := r.Read(p)\n\tif err != nil {\n\t\tt.Fatalf(\"read error: %v\", err)\n\t}\n\tif n != len(p) {\n\t\tt.Fatalf(\"read: short read %d\", n)\n\t}\n\tif string(p) != prefix {\n\t\tt.Fatalf(\"read: bad value %q\", p)\n\t}\n\n\tn, err = r.Read(p)\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected error\")\n\t}\n\tif n > 0 {\n\t\tt.Fatalf(\"read: read %d more bytes than expected\", n)\n\t}\n\tif !strings.Contains(err.Error(), \"unskippable\") {\n\t\tt.Fatalf(\"read error: %v\", err)\n\t}\n}\n\n// This test validates errors returned when data blocks exceed size limits.\nfunc TestReader_blockTooLarge(t *testing.T) {\n\t// the compressed chunk size is within the allowed encoding size\n\t// (maxEncodedBlockSize). but the uncompressed data is larger than allowed.\n\tbadstream := bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcompressedChunk(t, make([]byte, (1<<24)-5)),\n\t}, nil)\n\tr := NewReader(bytes.NewBuffer(badstream), true)\n\tp := make([]byte, 1)\n\tn, err := r.Read(p)\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected error\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read data from the stream\")\n\t}\n\n\t// the compressed chunk size is within the allowed encoding size\n\t// (maxEncodedBlockSize). but the uncompressed data is larger than allowed.\n\tbadstream = bytes.Join([][]byte{\n\t\tstreamID,\n\t\tuncompressedChunk(t, make([]byte, (1<<24)-5)),\n\t}, nil)\n\tr = NewReader(bytes.NewBuffer(badstream), true)\n\tp = make([]byte, 1)\n\tn, err = r.Read(p)\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected error\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read data from the stream\")\n\t}\n}\n\n// This test validates the reader's handling of corrupt chunks.\nfunc TestReader_corruption(t *testing.T) {\n\t// corruptID is a corrupt stream identifier\n\tcorruptID := append([]byte(nil), streamID...)\n\tcorruptID = bytes.Replace(streamID, []byte(\"p\"), []byte(\"P\"), -1) // corrupt \"sNaPpY\" data\n\tbadstream := corruptID\n\n\tr := NewReader(bytes.NewBuffer(badstream), true)\n\tp := make([]byte, 1)\n\tn, err := r.Read(p)\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected error\")\n\t}\n\tif err == io.EOF {\n\t\tt.Fatalf(\"read: unexpected eof\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read data from the stream\")\n\t}\n\n\tcorruptID = append([]byte(nil), streamID...) // corrupt the length\n\tcorruptID[1] = 0x00\n\tbadstream = corruptID\n\n\tr = NewReader(bytes.NewBuffer(badstream), true)\n\tp = make([]byte, 1)\n\tn, err = r.Read(p)\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected error\")\n\t}\n\tif err == io.EOF {\n\t\tt.Fatalf(\"read: unexpected eof\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read data from the stream\")\n\t}\n\n\t// chunk is a valid compressed block\n\tchunk := compressedChunk(t, []byte(\"a data block\"))\n\n\t// corrupt is a corrupt chunk\n\tcorrupt := append([]byte(nil), chunk...)\n\tcopy(corrupt[8:], make([]byte, 10)) // corrupt snappy-encoded data\n\tbadstream = bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcorrupt,\n\t}, nil)\n\n\tr = NewReader(bytes.NewBuffer(badstream), true)\n\tp = make([]byte, 1)\n\tn, err = r.Read(p)\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected error\")\n\t}\n\tif err == io.EOF {\n\t\tt.Fatalf(\"read: unexpected eof\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read data from the stream\")\n\t}\n\n\tcorrupt = append([]byte(nil), chunk...)\n\tcopy(corrupt[4:8], make([]byte, 4)) // crc checksum failure\n\tbadstream = bytes.Join([][]byte{\n\t\tstreamID,\n\t\tcorrupt,\n\t}, nil)\n\n\tr = NewReader(bytes.NewBuffer(badstream), true)\n\tp = make([]byte, 1)\n\tn, err = r.Read(p)\n\tif err == nil {\n\t\tt.Fatalf(\"read: expected error\")\n\t}\n\tif err == io.EOF {\n\t\tt.Fatalf(\"read: unexpected eof\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"read: read data from the stream\")\n\t}\n}\n\n// This test ensures that reader returns io.ErrUnexpectedEOF at the appropriate\n// times. io.EOF must be reserved for the case when all data has been\n// successfully decoded.\nfunc TestReader_unexpectedEOF(t *testing.T) {\n\tvar decodeBuffer [64 << 10]byte\n\n\tfor _, test := range [][]byte{\n\t\t// truncated streamIDs\n\t\tstreamID[:4],\n\t\tstreamID[:len(streamID)-1],\n\n\t\t// truncated data blocks\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\tcompressedChunk(t, bytes.Repeat([]byte(\"abc\"), 100))[:2],\n\t\t}, nil),\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\tcompressedChunk(t, bytes.Repeat([]byte(\"abc\"), 100))[:7],\n\t\t}, nil),\n\n\t\t// truncated padding\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\topaqueChunk(0xfe, 100)[:1],\n\t\t}, nil),\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\topaqueChunk(0xfe, 100)[:8],\n\t\t}, nil),\n\n\t\t// truncated skippable chunk\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\topaqueChunk(0xcf, 100)[:3],\n\t\t}, nil),\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\topaqueChunk(0xcf, 100)[:7],\n\t\t}, nil),\n\n\t\t// truncated unskippable chunk\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\topaqueChunk(0x03, 100)[:3],\n\t\t}, nil),\n\t\tbytes.Join([][]byte{\n\t\t\tstreamID,\n\t\t\topaqueChunk(0x03, 100)[:5],\n\t\t}, nil),\n\t} {\n\t\tr := NewReader(bytes.NewReader(test), true)\n\t\tn, err := r.Read(decodeBuffer[:])\n\t\tif err == nil {\n\t\t\tt.Errorf(\"read bad streamID: expected error\")\n\t\t}\n\t\tif err != io.ErrUnexpectedEOF {\n\t\t\tt.Errorf(\"read bad streamID: %v\", err)\n\t\t}\n\t\tif n != 0 {\n\t\t\tt.Errorf(\"read bad streamID: expected read length %d\", n)\n\t\t}\n\t}\n}\n\nvar errNotEnoughEntropy = fmt.Errorf(\"inadequate entropy in PRNG\")\n\n// compressedChunkGreaterN like compressedChunk produces a single, compressed,\n// snappy-framed block. The returned block will have decoded length at most n\n// and encoded length greater than n.\nfunc compressedChunkGreaterN(t *testing.T, n int) []byte {\n\tdecoded := make([]byte, n)\n\tvar numTries int\n\tvar encoded []byte\n\tfor len(encoded) <= n && numTries < 3 {\n\t\tnumTries++\n\t\tnrd, err := io.ReadFull(rand.Reader, decoded)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"crypto/rand: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t\tif nrd != n {\n\t\t\tt.Errorf(\"crypto/rand: bad read (%d bytes)\", nrd)\n\t\t\treturn nil\n\t\t}\n\t\tencoded, err = snappy.Encode(encoded[:cap(encoded)], decoded)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"snappy: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t}\n\tif len(encoded) <= n {\n\t\tt.Error(errNotEnoughEntropy)\n\t\treturn nil\n\t}\n\n\treturn compressedChunk(t, decoded)\n}\n\n// compressedChunk encodes b returning a single, compressed, snappy-framed\n// block. compressedChunk can encode source data larger than allowed in the\n// specification.\nfunc compressedChunk(t *testing.T, src []byte) []byte {\n\tencoded, err := snappy.Encode(nil, src)\n\tif err != nil {\n\t\tt.Errorf(\"snappy: %v\", err)\n\t\treturn nil\n\t}\n\n\tif len(encoded) > (1<<24)-5 { // account for the 4-byte checksum\n\t\tt.Errorf(\"block data too large %d\", len(src))\n\t\treturn nil\n\t}\n\n\tchunk := make([]byte, len(encoded)+8)\n\twriteHeader(chunk[:8], blockCompressed, encoded, src)\n\tcopy(chunk[8:], encoded)\n\treturn chunk\n}\n\n// uncompressedChunk encodes b returning a single, uncompressed, snappy-framed\n// block. uncompressedChunk can encode chunks larger than allowed by the\n// specification.\nfunc uncompressedChunk(t *testing.T, src []byte) []byte {\n\tif len(src) > (1<<24)-5 { // account for the 4-byte checksum\n\t\tt.Errorf(\"block data too large %d\", len(src))\n\t\treturn nil\n\t}\n\n\tchunk := make([]byte, len(src)+8)\n\twriteHeader(chunk[:8], blockUncompressed, src, src)\n\tcopy(chunk[8:], src)\n\treturn chunk\n}\n\n// opaqueChunk returns an opaque b chunk (e.g. padding 0xfe) with length n\n// (total length, n+4 bytes).  practically useless but good enough for testing.\n// the first 4-bytes of data are random to ensure checksums are not being\n// verified.\nfunc opaqueChunk(b byte, n int) []byte {\n\tif b == 0 {\n\t\tb = 0xfe\n\t}\n\n\tlength := uint32(n)\n\tlengthle := []byte{byte(length), byte(length >> 8), byte(length >> 16)}\n\tchecksum := make([]byte, 4)\n\t_, err := rand.Read(checksum)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tpadbytes := make([]byte, n-4) // let this panic if n < 4\n\t_, err = rand.Read(padbytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar h []byte\n\th = append(h, b)\n\th = append(h, lengthle...)\n\th = append(h, checksum...)\n\th = append(h, padbytes...)\n\treturn h\n}\n\nfunc TestReaderWriteTo(t *testing.T) {\n\tvar encbuf bytes.Buffer\n\tvar decbuf bytes.Buffer\n\tmsg := \"hello copy interface\"\n\n\tw := NewWriter(&encbuf)\n\tn, err := io.WriteString(w, msg)\n\tif err != nil {\n\t\tt.Fatalf(\"encode: %v\", err)\n\t}\n\tif n != len(msg) {\n\t\tt.Fatalf(\"encode: %v\", err)\n\t}\n\n\tr := NewReader(&encbuf, true)\n\tn64, err := r.(*reader).WriteTo(&decbuf)\n\tif err != nil {\n\t\tt.Fatalf(\"decode: %v\", err)\n\t}\n\tif n64 != int64(len(msg)) {\n\t\tt.Fatalf(\"decode: decoded %d bytes %q\", n64, decbuf.Bytes())\n\t}\n\n\tdecmsg := decbuf.String()\n\tif decmsg != msg {\n\t\tt.Fatalf(\"decode: %q\", decmsg)\n\t}\n}\n\nfunc TestReaderWriteToPreviousError(t *testing.T) {\n\t// construct an io.Reader that returns an error on the first read and a\n\t// valid snappy-framed stream on subsequent reads.\n\tvar stream io.Reader\n\tstream = encodedString(\"hello\")\n\tstream = readErrorFirst(stream, fmt.Errorf(\"one time error\"))\n\tstream = NewReader(stream, true)\n\n\tvar buf bytes.Buffer\n\n\t// attempt the first read from the stream.\n\tn, err := stream.(*reader).WriteTo(&buf)\n\tif err == nil {\n\t\tt.Fatalf(\"error expected\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"bytes written to buffer: %q\", buf.Bytes())\n\t}\n\n\t// attempt a second read from the stream.\n\tn, err = stream.(*reader).WriteTo(&buf)\n\tif err == nil {\n\t\tt.Fatalf(\"error expected\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"bytes written to buffer: %q\", buf.Bytes())\n\t}\n}\n\n// readerErrorFirst is an io.Reader that returns an error on the first read.\n// readerErrorFirst is used to test that a reader does not attempt to read\n// after a read error occurs.\ntype readerErrorFirst struct {\n\tr     io.Reader\n\terr   error\n\tcount int\n}\n\nfunc readErrorFirst(r io.Reader, err error) io.Reader {\n\treturn &readerErrorFirst{\n\t\tr:   r,\n\t\terr: err,\n\t}\n}\n\nfunc (r *readerErrorFirst) Read(b []byte) (int, error) {\n\tr.count++\n\tif r.count == 1 {\n\t\treturn 0, r.err\n\t}\n\treturn r.r.Read(b)\n}\n\nfunc TestReaderWriteToWriteError(t *testing.T) {\n\torigmsg := \"hello\"\n\tstream := NewReader(encodedString(origmsg), true)\n\n\t// attempt to write the stream to an io.Writer that will not accept input.\n\tn, err := stream.(*reader).WriteTo(unwritable(fmt.Errorf(\"cannot write to this writer\")))\n\tif err == nil {\n\t\tt.Fatalf(\"error expected\")\n\t}\n\tif n != 0 {\n\t\tt.Fatalf(\"reported %d written to an unwritable writer\", n)\n\t}\n\n\t// the decoded message can still be read successfully because the encoded\n\t// stream was not corrupt/broken.\n\tvar buf bytes.Buffer\n\tn, err = stream.(*reader).WriteTo(&buf)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\tif n != int64(len(origmsg)) {\n\t\tt.Errorf(\"read %d bytes\", n)\n\t}\n\tif buf.String() != origmsg {\n\t\tt.Errorf(\"read %q\", buf)\n\t}\n}\n\n// writerUnwritable is an io.Writer that always returns an error.\ntype writerUnwritable struct {\n\terr error\n}\n\nfunc (w *writerUnwritable) Write([]byte) (int, error) {\n\treturn 0, w.err\n}\n\nfunc unwritable(err error) io.Writer {\n\treturn &writerUnwritable{err}\n}\n\nfunc encodedString(s string) io.Reader {\n\tvar buf bytes.Buffer\n\tw := NewWriter(&buf)\n\tio.WriteString(w, s)\n\treturn &buf\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/readwrite_test.go",
    "content": "package snappystream\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"testing\"\n)\n\nconst TestFileSize = 10 << 20 // 10MB\n\n// dummyBytesReader returns an io.Reader that avoids buffering optimizations\n// in io.Copy. This can be considered a 'worst-case' io.Reader as far as writer\n// frame alignment goes.\n//\n// Note: io.Copy uses a 32KB buffer internally as of Go 1.3, but that isn't\n// part of its public API (undocumented).\nfunc dummyBytesReader(p []byte) io.Reader {\n\treturn ioutil.NopCloser(bytes.NewReader(p))\n}\n\nfunc testWriteThenRead(t *testing.T, name string, bs []byte) {\n\tvar buf bytes.Buffer\n\tw := NewWriter(&buf)\n\tn, err := io.Copy(w, dummyBytesReader(bs))\n\tif err != nil {\n\t\tt.Errorf(\"write %v: %v\", name, err)\n\t\treturn\n\t}\n\tif n != int64(len(bs)) {\n\t\tt.Errorf(\"write %v: wrote %d bytes (!= %d)\", name, n, len(bs))\n\t\treturn\n\t}\n\n\tenclen := buf.Len()\n\n\tr := NewReader(&buf, true)\n\tgotbs, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tt.Errorf(\"read %v: %v\", name, err)\n\t\treturn\n\t}\n\tn = int64(len(gotbs))\n\tif n != int64(len(bs)) {\n\t\tt.Errorf(\"read %v: read %d bytes (!= %d)\", name, n, len(bs))\n\t\treturn\n\t}\n\n\tif !bytes.Equal(gotbs, bs) {\n\t\tt.Errorf(\"%v: unequal decompressed content\", name)\n\t\treturn\n\t}\n\n\tc := float64(len(bs)) / float64(enclen)\n\tt.Logf(\"%v compression ratio %.03g (%d byte reduction)\", name, c, len(bs)-enclen)\n}\n\nfunc testBufferedWriteThenRead(t *testing.T, name string, bs []byte) {\n\tvar buf bytes.Buffer\n\tw := NewBufferedWriter(&buf)\n\tn, err := io.Copy(w, dummyBytesReader(bs))\n\tif err != nil {\n\t\tt.Errorf(\"write %v: %v\", name, err)\n\t\treturn\n\t}\n\tif n != int64(len(bs)) {\n\t\tt.Errorf(\"write %v: wrote %d bytes (!= %d)\", name, n, len(bs))\n\t\treturn\n\t}\n\terr = w.Close()\n\tif err != nil {\n\t\tt.Errorf(\"close %v: %v\", name, err)\n\t\treturn\n\t}\n\n\tenclen := buf.Len()\n\n\tr := NewReader(&buf, true)\n\tgotbs, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tt.Errorf(\"read %v: %v\", name, err)\n\t\treturn\n\t}\n\tn = int64(len(gotbs))\n\tif n != int64(len(bs)) {\n\t\tt.Errorf(\"read %v: read %d bytes (!= %d)\", name, n, len(bs))\n\t\treturn\n\t}\n\n\tif !bytes.Equal(gotbs, bs) {\n\t\tt.Errorf(\"%v: unequal decompressed content\", name)\n\t\treturn\n\t}\n\n\tc := float64(len(bs)) / float64(enclen)\n\tt.Logf(\"%v compression ratio %.03g (%d byte reduction)\", name, c, len(bs)-enclen)\n}\n\nfunc TestWriterReader(t *testing.T) {\n\ttestWriteThenRead(t, \"simple\", []byte(\"test\"))\n\ttestWriteThenRead(t, \"manpage\", testDataMan)\n\ttestWriteThenRead(t, \"json\", testDataJSON)\n\n\tp := make([]byte, TestFileSize)\n\ttestWriteThenRead(t, \"constant\", p)\n\n\t_, err := rand.Read(p)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttestWriteThenRead(t, \"random\", p)\n\n}\n\nfunc TestBufferedWriterReader(t *testing.T) {\n\ttestBufferedWriteThenRead(t, \"simple\", []byte(\"test\"))\n\ttestBufferedWriteThenRead(t, \"manpage\", testDataMan)\n\ttestBufferedWriteThenRead(t, \"json\", testDataJSON)\n\n\tp := make([]byte, TestFileSize)\n\ttestBufferedWriteThenRead(t, \"constant\", p)\n\n\t_, err := rand.Read(p)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttestBufferedWriteThenRead(t, \"random\", p)\n\n}\n\nfunc TestWriterChunk(t *testing.T) {\n\tvar buf bytes.Buffer\n\n\tin := make([]byte, 128000)\n\n\tw := NewWriter(&buf)\n\tr := NewReader(&buf, VerifyChecksum)\n\n\tn, err := w.Write(in)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tif n != len(in) {\n\t\tt.Fatalf(\"wrote wrong amount %d != %d\", n, len(in))\n\t}\n\n\tout := make([]byte, len(in))\n\tn, err = io.ReadFull(r, out)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n != len(in) {\n\t\tt.Fatalf(\"read wrong amount %d != %d\", n, len(in))\n\t}\n\n\tif !bytes.Equal(out, in) {\n\t\tt.Fatalf(\"bytes not equal %v != %v\", out, in)\n\t}\n}\n\nfunc BenchmarkWriterManpage(b *testing.B) {\n\tbenchmarkWriterBytes(b, testDataMan)\n}\nfunc BenchmarkBufferedWriterManpage(b *testing.B) {\n\tbenchmarkBufferedWriterBytes(b, testDataMan)\n}\nfunc BenchmarkBufferedWriterManpageNoCopy(b *testing.B) {\n\tbenchmarkBufferedWriterBytesNoCopy(b, testDataMan)\n}\n\nfunc BenchmarkWriterJSON(b *testing.B) {\n\tbenchmarkWriterBytes(b, testDataJSON)\n}\nfunc BenchmarkBufferedWriterJSON(b *testing.B) {\n\tbenchmarkBufferedWriterBytes(b, testDataJSON)\n}\nfunc BenchmarkBufferedWriterJSONNoCopy(b *testing.B) {\n\tbenchmarkBufferedWriterBytesNoCopy(b, testDataJSON)\n}\n\n// BenchmarkWriterRandom tests performance encoding effectively uncompressable\n// data.\nfunc BenchmarkWriterRandom(b *testing.B) {\n\tbenchmarkWriterBytes(b, randBytes(b, TestFileSize))\n}\nfunc BenchmarkBufferedWriterRandom(b *testing.B) {\n\tbenchmarkBufferedWriterBytes(b, randBytes(b, TestFileSize))\n}\nfunc BenchmarkBufferedWriterRandomNoCopy(b *testing.B) {\n\tbenchmarkBufferedWriterBytesNoCopy(b, randBytes(b, TestFileSize))\n}\n\n// BenchmarkWriterConstant tests performance encoding maximally compressible\n// data.\nfunc BenchmarkWriterConstant(b *testing.B) {\n\tbenchmarkWriterBytes(b, make([]byte, TestFileSize))\n}\nfunc BenchmarkBufferedWriterConstant(b *testing.B) {\n\tbenchmarkBufferedWriterBytes(b, make([]byte, TestFileSize))\n}\nfunc BenchmarkBufferedWriterConstantNoCopy(b *testing.B) {\n\tbenchmarkBufferedWriterBytesNoCopy(b, make([]byte, TestFileSize))\n}\n\nfunc benchmarkWriterBytes(b *testing.B, p []byte) {\n\tenc := func() io.WriteCloser {\n\t\t// wrap the normal writer so that it has a noop Close method.  writer\n\t\t// does not implement ReaderFrom so this does not impact performance.\n\t\treturn &nopWriteCloser{NewWriter(ioutil.Discard)}\n\t}\n\tbenchmarkEncode(b, enc, p)\n}\nfunc benchmarkBufferedWriterBytes(b *testing.B, p []byte) {\n\tenc := func() io.WriteCloser {\n\t\t// the writer's ReaderFrom implemention will be used in the benchmark.\n\t\treturn NewBufferedWriter(ioutil.Discard)\n\t}\n\tbenchmarkEncode(b, enc, p)\n}\nfunc benchmarkBufferedWriterBytesNoCopy(b *testing.B, p []byte) {\n\tenc := func() io.WriteCloser {\n\t\t// the writer is wrapped as to hide it's ReaderFrom implemention.\n\t\treturn &writeCloserNoCopy{NewBufferedWriter(ioutil.Discard)}\n\t}\n\tbenchmarkEncode(b, enc, p)\n}\n\n// benchmarkEncode benchmarks the speed at which bytes can be copied from\n// bs into writers created by enc.\nfunc benchmarkEncode(b *testing.B, enc func() io.WriteCloser, bs []byte) {\n\tsize := int64(len(bs))\n\tb.SetBytes(size)\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tw := enc()\n\t\tn, err := io.Copy(w, dummyBytesReader(bs))\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tif n != size {\n\t\t\tb.Fatalf(\"wrote wrong amount %d != %d\", n, size)\n\t\t}\n\t\terr = w.Close()\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"close: %v\", err)\n\t\t}\n\t}\n\tb.StopTimer()\n}\n\nfunc BenchmarkReaderManpage(b *testing.B) {\n\tencodeAndBenchmarkReader(b, testDataMan)\n}\nfunc BenchmarkReaderManpage_buffered(b *testing.B) {\n\tencodeAndBenchmarkReader_buffered(b, testDataMan)\n}\nfunc BenchmarkReaderManpageNoCopy(b *testing.B) {\n\tencodeAndBenchmarkReaderNoCopy(b, testDataMan)\n}\n\nfunc BenchmarkReaderJSON(b *testing.B) {\n\tencodeAndBenchmarkReader(b, testDataJSON)\n}\nfunc BenchmarkReaderJSON_buffered(b *testing.B) {\n\tencodeAndBenchmarkReader_buffered(b, testDataJSON)\n}\nfunc BenchmarkReaderJSONNoCopy(b *testing.B) {\n\tencodeAndBenchmarkReaderNoCopy(b, testDataJSON)\n}\n\n// BenchmarkReaderRandom tests decoding of effectively uncompressable data.\nfunc BenchmarkReaderRandom(b *testing.B) {\n\tencodeAndBenchmarkReader(b, randBytes(b, TestFileSize))\n}\nfunc BenchmarkReaderRandom_buffered(b *testing.B) {\n\tencodeAndBenchmarkReader_buffered(b, randBytes(b, TestFileSize))\n}\nfunc BenchmarkReaderRandomNoCopy(b *testing.B) {\n\tencodeAndBenchmarkReaderNoCopy(b, randBytes(b, TestFileSize))\n}\n\n// BenchmarkReaderConstant tests decoding of maximally compressible data.\nfunc BenchmarkReaderConstant(b *testing.B) {\n\tencodeAndBenchmarkReader(b, make([]byte, TestFileSize))\n}\nfunc BenchmarkReaderConstant_buffered(b *testing.B) {\n\tencodeAndBenchmarkReader_buffered(b, make([]byte, TestFileSize))\n}\nfunc BenchmarkReaderConstantNoCopy(b *testing.B) {\n\tencodeAndBenchmarkReaderNoCopy(b, make([]byte, TestFileSize))\n}\n\n// encodeAndBenchmarkReader is a helper that benchmarks the package\n// reader's performance given p encoded as a snappy framed stream.\n//\n// encodeAndBenchmarkReader benchmarks decoding of streams containing\n// (multiple) short frames.\nfunc encodeAndBenchmarkReader(b *testing.B, p []byte) {\n\tenc, err := encodeStreamBytes(p, false)\n\tif err != nil {\n\t\tb.Fatalf(\"pre-benchmark compression: %v\", err)\n\t}\n\tdec := func(r io.Reader) io.Reader {\n\t\treturn NewReader(r, VerifyChecksum)\n\t}\n\tbenchmarkDecode(b, dec, int64(len(p)), enc)\n}\n\n// encodeAndBenchmarkReader_buffered is a helper that benchmarks the\n// package reader's performance given p encoded as a snappy framed stream.\n//\n// encodeAndBenchmarkReader_buffered benchmarks decoding of streams that\n// contain at most one short frame (at the end).\nfunc encodeAndBenchmarkReader_buffered(b *testing.B, p []byte) {\n\tenc, err := encodeStreamBytes(p, true)\n\tif err != nil {\n\t\tb.Fatalf(\"pre-benchmark compression: %v\", err)\n\t}\n\tdec := func(r io.Reader) io.Reader {\n\t\treturn NewReader(r, VerifyChecksum)\n\t}\n\tbenchmarkDecode(b, dec, int64(len(p)), enc)\n}\n\n// encodeAndBenchmarkReaderNoCopy is a helper that benchmarks the\n// package reader's performance given p encoded as a snappy framed stream.\n// encodeAndBenchmarReaderNoCopy avoids use of the reader's io.WriterTo\n// interface.\n//\n// encodeAndBenchmarkReaderNoCopy benchmarks decoding of streams that\n// contain at most one short frame (at the end).\nfunc encodeAndBenchmarkReaderNoCopy(b *testing.B, p []byte) {\n\tenc, err := encodeStreamBytes(p, true)\n\tif err != nil {\n\t\tb.Fatalf(\"pre-benchmark compression: %v\", err)\n\t}\n\tdec := func(r io.Reader) io.Reader {\n\t\treturn ioutil.NopCloser(NewReader(r, VerifyChecksum))\n\t}\n\tbenchmarkDecode(b, dec, int64(len(p)), enc)\n}\n\n// benchmarkDecode runs a benchmark that repeatedly decoded snappy\n// framed bytes enc.  The length of the decoded result in each iteration must\n// equal size.\nfunc benchmarkDecode(b *testing.B, dec func(io.Reader) io.Reader, size int64, enc []byte) {\n\tb.SetBytes(int64(len(enc))) // BUG this is probably wrong\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tr := dec(bytes.NewReader(enc))\n\t\tn, err := io.Copy(ioutil.Discard, r)\n\t\tif err != nil {\n\t\t\tb.Fatalf(err.Error())\n\t\t}\n\t\tif n != size {\n\t\t\tb.Fatalf(\"read wrong amount %d != %d\", n, size)\n\t\t}\n\t}\n\tb.StopTimer()\n}\n\n// encodeStreamBytes is like encodeStream but operates on a byte slice.\n// encodeStreamBytes ensures that long streams are not maximally compressed if\n// buffer is false.\nfunc encodeStreamBytes(b []byte, buffer bool) ([]byte, error) {\n\treturn encodeStream(dummyBytesReader(b), buffer)\n}\n\n// encodeStream encodes data read from r as a snappy framed stream and returns\n// the result as a byte slice.  if buffer is true the bytes from r are buffered\n// to improve the resulting slice's compression ratio.\nfunc encodeStream(r io.Reader, buffer bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tif !buffer {\n\t\tw := NewWriter(&buf)\n\t\t_, err := io.Copy(w, r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\n\tw := NewBufferedWriter(&buf)\n\t_, err := io.Copy(w, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = w.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}\n\n// randBytes reads size bytes from the computer's cryptographic random source.\n// the resulting bytes have approximately maximal entropy and are effectively\n// uncompressible with any algorithm.\nfunc randBytes(b *testing.B, size int) []byte {\n\trandp := make([]byte, size)\n\t_, err := io.ReadFull(rand.Reader, randp)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\treturn randp\n}\n\n// writeCloserNoCopy is an io.WriteCloser that simply wraps another\n// io.WriteCloser.  This is useful for masking implementations for interfaces\n// like ReaderFrom which may be opted into use inside functions like io.Copy.\ntype writeCloserNoCopy struct {\n\tio.WriteCloser\n}\n\n// nopWriteCloser is an io.WriteCloser that has a noop Close method.  This type\n// has the effect of masking the underlying writer's Close implementation if it\n// has one, or satisfying interface implementations for writers that do not\n// need to be closing.\ntype nopWriteCloser struct {\n\tio.Writer\n}\n\nfunc (w *nopWriteCloser) Close() error {\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/snappystream.go",
    "content": "// snappystream wraps snappy-go and supplies a Reader and Writer\n// for the snappy framed stream format:\n//     https://snappy.googlecode.com/svn/trunk/framing_format.txt\npackage snappystream\n\nimport (\n\t\"hash/crc32\"\n\n\t\"code.google.com/p/snappy-go/snappy\"\n)\n\n// Ext is the file extension for files whose content is a snappy framed stream.\nconst Ext = \".sz\"\n\n// MediaType is the MIME type used to represent snappy framed content.\nconst MediaType = \"application/x-snappy-framed\"\n\n// ContentEncoding is the appropriate HTTP Content-Encoding header value for\n// requests containing a snappy framed entity body.\nconst ContentEncoding = \"x-snappy-framed\"\n\n// MaxBlockSize is the maximum number of decoded bytes allowed to be\n// represented in a snappy framed block (sections 4.2 and 4.3).\nconst MaxBlockSize = 65536\n\n// maxEncodedBlockSize is the maximum number of encoded bytes in a framed\n// block.\nvar maxEncodedBlockSize = uint32(snappy.MaxEncodedLen(MaxBlockSize))\n\nconst VerifyChecksum = true\nconst SkipVerifyChecksum = false\n\n// Block types defined by the snappy framed format specification.\nconst (\n\tblockCompressed       = 0x00\n\tblockUncompressed     = 0x01\n\tblockPadding          = 0xfe\n\tblockStreamIdentifier = 0xff\n)\n\n// streamID is the stream identifier block that begins a valid snappy framed\n// stream.\nvar streamID = []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59}\n\n// maskChecksum implements the checksum masking algorithm described by the spec.\nfunc maskChecksum(c uint32) uint32 {\n\treturn ((c >> 15) | (c << 17)) + 0xa282ead8\n}\n\nvar crcTable *crc32.Table\n\nfunc init() {\n\tcrcTable = crc32.MakeTable(crc32.Castagnoli)\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/writer.go",
    "content": "package snappystream\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash/crc32\"\n\t\"io\"\n\n\t\"code.google.com/p/snappy-go/snappy\"\n)\n\nvar errClosed = fmt.Errorf(\"closed\")\n\n// BufferedWriter is an io.WriteCloser with behavior similar to writers\n// returned by NewWriter but it buffers written data, maximizing block size (to\n// improve the output compression ratio) at the cost of speed. Benefits over\n// NewWriter are most noticible when individual writes are small and when\n// streams are long.\n//\n// Failure to call a BufferedWriter's Close or Flush methods after it is done\n// being written to will likely result in missing data frames which will be\n// undetectable in the decoding process.\n//\n// NOTE: BufferedWriter cannot be instantiated via struct literal and must\n// use NewBufferedWriter (i.e. its zero value is not usable).\ntype BufferedWriter struct {\n\terr error\n\tw   *writer\n\tbw  *bufio.Writer\n}\n\n// NewBufferedWriter allocates and returns a BufferedWriter with an internal\n// buffer of MaxBlockSize bytes.  If an error occurs writing a block to w, all\n// future writes will fail with the same error.  After all data has been\n// written, the client should call the Flush method to guarantee all data has\n// been forwarded to the underlying io.Writer.\nfunc NewBufferedWriter(w io.Writer) *BufferedWriter {\n\t_w := NewWriter(w).(*writer)\n\treturn &BufferedWriter{\n\t\tw:  _w,\n\t\tbw: bufio.NewWriterSize(_w, MaxBlockSize),\n\t}\n}\n\n// ReadFrom implements the io.ReaderFrom interface used by io.Copy. It encodes\n// data read from r as a snappy framed stream that is written to the underlying\n// writer.  ReadFrom returns the number number of bytes read, along with any\n// error encountered (other than io.EOF).\nfunc (w *BufferedWriter) ReadFrom(r io.Reader) (int64, error) {\n\tif w.err != nil {\n\t\treturn 0, w.err\n\t}\n\n\tvar n int64\n\tn, w.err = w.bw.ReadFrom(r)\n\treturn n, w.err\n}\n\n// Write buffers p internally, encoding and writing a block to the underlying\n// buffer if the buffer grows beyond MaxBlockSize bytes.  The returned int\n// will be 0 if there was an error and len(p) otherwise.\nfunc (w *BufferedWriter) Write(p []byte) (int, error) {\n\tif w.err != nil {\n\t\treturn 0, w.err\n\t}\n\n\t_, w.err = w.bw.Write(p)\n\tif w.err != nil {\n\t\treturn 0, w.err\n\t}\n\n\treturn len(p), nil\n}\n\n// Flush encodes and writes a block with the contents of w's internal buffer to\n// the underlying writer even if the buffer does not contain a full block of\n// data (MaxBlockSize bytes).\nfunc (w *BufferedWriter) Flush() error {\n\tif w.err == nil {\n\t\tw.err = w.bw.Flush()\n\t}\n\n\treturn w.err\n}\n\n// Close flushes w's internal buffer and tears down internal data structures.\n// After a successful call to Close method calls on w return an error.  Close\n// makes no attempt to close the underlying writer.\nfunc (w *BufferedWriter) Close() error {\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\n\tw.err = w.bw.Flush()\n\tw.w = nil\n\tw.bw = nil\n\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\n\tw.err = errClosed\n\treturn nil\n}\n\ntype writer struct {\n\twriter io.Writer\n\terr    error\n\n\thdr []byte\n\tdst []byte\n\n\tsentStreamID bool\n}\n\n// NewWriter returns an io.Writer that writes its input to an underlying\n// io.Writer encoded as a snappy framed stream.  A stream identifier block is\n// written to w preceding the first data block.  The returned writer will never\n// emit a block with length in bytes greater than MaxBlockSize+4 nor one\n// containing more than MaxBlockSize bytes of (uncompressed) data.\n//\n// For each Write, the returned length will only ever be len(p) or 0,\n// regardless of the length of *compressed* bytes written to the wrapped\n// io.Writer.  If the returned length is 0 then error will be non-nil.  If\n// len(p) exceeds 65536, the slice will be automatically chunked into smaller\n// blocks which are all emitted before the call returns.\nfunc NewWriter(w io.Writer) io.Writer {\n\treturn &writer{\n\t\twriter: w,\n\n\t\thdr: make([]byte, 8),\n\t\tdst: make([]byte, 4096),\n\t}\n}\n\nfunc (w *writer) Write(p []byte) (int, error) {\n\tif w.err != nil {\n\t\treturn 0, w.err\n\t}\n\n\ttotal := 0\n\tsz := MaxBlockSize\n\tvar n int\n\tfor i := 0; i < len(p); i += n {\n\t\tif i+sz > len(p) {\n\t\t\tsz = len(p) - i\n\t\t}\n\n\t\tn, w.err = w.write(p[i : i+sz])\n\t\tif w.err != nil {\n\t\t\treturn 0, w.err\n\t\t}\n\t\ttotal += n\n\t}\n\treturn total, nil\n}\n\n// write attempts to encode p as a block and write it to the underlying writer.\n// The returned int may not equal p's length if compression below\n// MaxBlockSize-4 could not be achieved.\nfunc (w *writer) write(p []byte) (int, error) {\n\tvar err error\n\n\tif len(p) > MaxBlockSize {\n\t\treturn 0, errors.New(fmt.Sprintf(\"block too large %d > %d\", len(p), MaxBlockSize))\n\t}\n\n\tw.dst = w.dst[:cap(w.dst)] // Encode does dumb resize w/o context. reslice avoids alloc.\n\tw.dst, err = snappy.Encode(w.dst, p)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tblock := w.dst\n\tn := len(p)\n\tcompressed := true\n\n\t// check for data which is better left uncompressed.  this is determined if\n\t// the encoded content is longer than the source.\n\tif len(w.dst) >= len(p) {\n\t\tcompressed = false\n\t\tblock = p[:n]\n\t}\n\n\tif !w.sentStreamID {\n\t\t_, err := w.writer.Write(streamID)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tw.sentStreamID = true\n\t}\n\n\t// set the block type\n\tif compressed {\n\t\twriteHeader(w.hdr, blockCompressed, block, p[:n])\n\t} else {\n\t\twriteHeader(w.hdr, blockUncompressed, block, p[:n])\n\t}\n\n\t_, err = w.writer.Write(w.hdr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t_, err = w.writer.Write(block)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn n, nil\n}\n\n// writeHeader panics if len(hdr) is less than 8.\nfunc writeHeader(hdr []byte, btype byte, enc, dec []byte) {\n\thdr[0] = btype\n\n\t// 3 byte little endian length of encoded content\n\tlength := uint32(len(enc)) + 4 // +4 for checksum\n\thdr[1] = byte(length)\n\thdr[2] = byte(length >> 8)\n\thdr[3] = byte(length >> 16)\n\n\t// 4 byte little endian CRC32 checksum of decoded content\n\tchecksum := maskChecksum(crc32.Checksum(dec, crcTable))\n\thdr[4] = byte(checksum)\n\thdr[5] = byte(checksum >> 8)\n\thdr[6] = byte(checksum >> 16)\n\thdr[7] = byte(checksum >> 24)\n}\n"
  },
  {
    "path": "vendor/github.com/mreiferson/go-snappystream/writer_test.go",
    "content": "package snappystream\n\nimport (\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"testing\"\n)\n\n// This test ensures that all BufferedWriter methods fail after Close has been\n// called.\nfunc TestBufferedWriterClose(t *testing.T) {\n\tw := NewBufferedWriter(ioutil.Discard)\n\terr := w.Close()\n\tif err != nil {\n\t\tlog.Fatalf(\"closing empty BufferedWriter: %v\", err)\n\t}\n\terr = w.Close()\n\tif err == nil {\n\t\tlog.Fatalf(\"successful close after close\")\n\t}\n\terr = w.Flush()\n\tif err == nil {\n\t\tlog.Fatalf(\"successful flush after close\")\n\t}\n\t_, err = w.Write([]byte(\"abc\"))\n\tif err == nil {\n\t\tlog.Fatalf(\"successful write after close\")\n\t}\n}\n\n// This test simply checks that buffering has an effect in a situation where it\n// is know it should.\nfunc TestBufferedWriter_compression(t *testing.T) {\n\tp := []byte(\"hello snappystream!\")\n\tn := 10\n\n\tvar shortbuf bytes.Buffer\n\tw := NewWriter(&shortbuf)\n\tfor i := 0; i < n; i++ {\n\t\tn, err := w.Write(p)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"writer error: %v\", err)\n\t\t}\n\t\tif n != len(p) {\n\t\t\tt.Fatalf(\"short write: %d\", n)\n\t\t}\n\t}\n\n\tvar buf bytes.Buffer\n\tbw := NewBufferedWriter(&buf)\n\tfor i := 0; i < n; i++ {\n\t\tn, err := bw.Write(p)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"buffered writer error: %v\", err)\n\t\t}\n\t\tif n != len(p) {\n\t\t\tt.Fatalf(\"short write: %d\", n)\n\t\t}\n\t}\n\terr := bw.Close()\n\tif err != nil {\n\t\tt.Fatalf(\"closing buffer: %v\", err)\n\t}\n\n\tuncompressed := int64(n) * int64(len(p))\n\tcompressed := shortbuf.Len()\n\tbufcompressed := buf.Len()\n\n\tif compressed <= bufcompressed {\n\t\tt.Fatalf(\"no benefit from buffering (%d <= %d)\", shortbuf.Len(), buf.Len())\n\t}\n\n\tc := float64(uncompressed) / float64(compressed)\n\tbufc := float64(uncompressed) / float64(bufcompressed)\n\timproved := bufc / c\n\n\tt.Logf(\"BufferedWriter compression ratio %g (%.03g factor improvement over %g)\", bufc, improved, c)\n}\n\n// This tests ensures flushing after every write is equivalent to using\n// NewWriter directly.\nfunc TestBufferedWriterFlush(t *testing.T) {\n\tp := []byte(\"hello snappystream!\")\n\tn := 10\n\n\tvar shortbuf bytes.Buffer\n\tw := NewWriter(&shortbuf)\n\tfor i := 0; i < n; i++ {\n\t\tn, err := w.Write(p)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"writer error: %v\", err)\n\t\t}\n\t\tif n != len(p) {\n\t\t\tt.Fatalf(\"short write: %d\", n)\n\t\t}\n\t}\n\n\tvar buf bytes.Buffer\n\tbw := NewBufferedWriter(&buf)\n\tfor i := 0; i < n; i++ {\n\t\tn, err := bw.Write(p)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"buffered writer error: %v\", err)\n\t\t}\n\t\tif n != len(p) {\n\t\t\tt.Fatalf(\"short write: %d\", n)\n\t\t}\n\t\terr = bw.Flush()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"flush: %v\", err)\n\t\t}\n\t}\n\terr := bw.Close()\n\tif err != nil {\n\t\tt.Fatalf(\"closing buffer: %v\", err)\n\t}\n\n\tif shortbuf.Len() != buf.Len() {\n\t\tt.Fatalf(\"unexpected size: %d != %d\", shortbuf.Len(), buf.Len())\n\t}\n\n\tif !bytes.Equal(shortbuf.Bytes(), buf.Bytes()) {\n\t\tt.Fatalf(\"unexpected bytes\")\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/context.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package context defines the Context type, which carries deadlines,\n// cancelation signals, and other request-scoped values across API boundaries\n// and between processes.\n//\n// Incoming requests to a server should create a Context, and outgoing calls to\n// servers should accept a Context.  The chain of function calls between must\n// propagate the Context, optionally replacing it with a modified copy created\n// using WithDeadline, WithTimeout, WithCancel, or WithValue.\n//\n// Programs that use Contexts should follow these rules to keep interfaces\n// consistent across packages and enable static analysis tools to check context\n// propagation:\n//\n// Do not store Contexts inside a struct type; instead, pass a Context\n// explicitly to each function that needs it.  The Context should be the first\n// parameter, typically named ctx:\n//\n// \tfunc DoSomething(ctx context.Context, arg Arg) error {\n// \t\t// ... use ctx ...\n// \t}\n//\n// Do not pass a nil Context, even if a function permits it.  Pass context.TODO\n// if you are unsure about which Context to use.\n//\n// Use context Values only for request-scoped data that transits processes and\n// APIs, not for passing optional parameters to functions.\n//\n// The same Context may be passed to functions running in different goroutines;\n// Contexts are safe for simultaneous use by multiple goroutines.\n//\n// See http://blog.golang.org/context for example code for a server that uses\n// Contexts.\npackage context\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n)\n\n// A Context carries a deadline, a cancelation signal, and other values across\n// API boundaries.\n//\n// Context's methods may be called by multiple goroutines simultaneously.\ntype Context interface {\n\t// Deadline returns the time when work done on behalf of this context\n\t// should be canceled.  Deadline returns ok==false when no deadline is\n\t// set.  Successive calls to Deadline return the same results.\n\tDeadline() (deadline time.Time, ok bool)\n\n\t// Done returns a channel that's closed when work done on behalf of this\n\t// context should be canceled.  Done may return nil if this context can\n\t// never be canceled.  Successive calls to Done return the same value.\n\t//\n\t// WithCancel arranges for Done to be closed when cancel is called;\n\t// WithDeadline arranges for Done to be closed when the deadline\n\t// expires; WithTimeout arranges for Done to be closed when the timeout\n\t// elapses.\n\t//\n\t// Done is provided for use in select statements:\n\t//\n\t//  // Stream generates values with DoSomething and sends them to out\n\t//  // until DoSomething returns an error or ctx.Done is closed.\n\t//  func Stream(ctx context.Context, out <-chan Value) error {\n\t//  \tfor {\n\t//  \t\tv, err := DoSomething(ctx)\n\t//  \t\tif err != nil {\n\t//  \t\t\treturn err\n\t//  \t\t}\n\t//  \t\tselect {\n\t//  \t\tcase <-ctx.Done():\n\t//  \t\t\treturn ctx.Err()\n\t//  \t\tcase out <- v:\n\t//  \t\t}\n\t//  \t}\n\t//  }\n\t//\n\t// See http://blog.golang.org/pipelines for more examples of how to use\n\t// a Done channel for cancelation.\n\tDone() <-chan struct{}\n\n\t// Err returns a non-nil error value after Done is closed.  Err returns\n\t// Canceled if the context was canceled or DeadlineExceeded if the\n\t// context's deadline passed.  No other values for Err are defined.\n\t// After Done is closed, successive calls to Err return the same value.\n\tErr() error\n\n\t// Value returns the value associated with this context for key, or nil\n\t// if no value is associated with key.  Successive calls to Value with\n\t// the same key returns the same result.\n\t//\n\t// Use context values only for request-scoped data that transits\n\t// processes and API boundaries, not for passing optional parameters to\n\t// functions.\n\t//\n\t// A key identifies a specific value in a Context.  Functions that wish\n\t// to store values in Context typically allocate a key in a global\n\t// variable then use that key as the argument to context.WithValue and\n\t// Context.Value.  A key can be any type that supports equality;\n\t// packages should define keys as an unexported type to avoid\n\t// collisions.\n\t//\n\t// Packages that define a Context key should provide type-safe accessors\n\t// for the values stores using that key:\n\t//\n\t// \t// Package user defines a User type that's stored in Contexts.\n\t// \tpackage user\n\t//\n\t// \timport \"golang.org/x/net/context\"\n\t//\n\t// \t// User is the type of value stored in the Contexts.\n\t// \ttype User struct {...}\n\t//\n\t// \t// key is an unexported type for keys defined in this package.\n\t// \t// This prevents collisions with keys defined in other packages.\n\t// \ttype key int\n\t//\n\t// \t// userKey is the key for user.User values in Contexts.  It is\n\t// \t// unexported; clients use user.NewContext and user.FromContext\n\t// \t// instead of using this key directly.\n\t// \tvar userKey key = 0\n\t//\n\t// \t// NewContext returns a new Context that carries value u.\n\t// \tfunc NewContext(ctx context.Context, u *User) context.Context {\n\t// \t\treturn context.WithValue(ctx, userKey, u)\n\t// \t}\n\t//\n\t// \t// FromContext returns the User value stored in ctx, if any.\n\t// \tfunc FromContext(ctx context.Context) (*User, bool) {\n\t// \t\tu, ok := ctx.Value(userKey).(*User)\n\t// \t\treturn u, ok\n\t// \t}\n\tValue(key interface{}) interface{}\n}\n\n// Canceled is the error returned by Context.Err when the context is canceled.\nvar Canceled = errors.New(\"context canceled\")\n\n// DeadlineExceeded is the error returned by Context.Err when the context's\n// deadline passes.\nvar DeadlineExceeded = errors.New(\"context deadline exceeded\")\n\n// An emptyCtx is never canceled, has no values, and has no deadline.  It is not\n// struct{}, since vars of this type must have distinct addresses.\ntype emptyCtx int\n\nfunc (*emptyCtx) Deadline() (deadline time.Time, ok bool) {\n\treturn\n}\n\nfunc (*emptyCtx) Done() <-chan struct{} {\n\treturn nil\n}\n\nfunc (*emptyCtx) Err() error {\n\treturn nil\n}\n\nfunc (*emptyCtx) Value(key interface{}) interface{} {\n\treturn nil\n}\n\nfunc (e *emptyCtx) String() string {\n\tswitch e {\n\tcase background:\n\t\treturn \"context.Background\"\n\tcase todo:\n\t\treturn \"context.TODO\"\n\t}\n\treturn \"unknown empty Context\"\n}\n\nvar (\n\tbackground = new(emptyCtx)\n\ttodo       = new(emptyCtx)\n)\n\n// Background returns a non-nil, empty Context. It is never canceled, has no\n// values, and has no deadline.  It is typically used by the main function,\n// initialization, and tests, and as the top-level Context for incoming\n// requests.\nfunc Background() Context {\n\treturn background\n}\n\n// TODO returns a non-nil, empty Context.  Code should use context.TODO when\n// it's unclear which Context to use or it's is not yet available (because the\n// surrounding function has not yet been extended to accept a Context\n// parameter).  TODO is recognized by static analysis tools that determine\n// whether Contexts are propagated correctly in a program.\nfunc TODO() Context {\n\treturn todo\n}\n\n// A CancelFunc tells an operation to abandon its work.\n// A CancelFunc does not wait for the work to stop.\n// After the first call, subsequent calls to a CancelFunc do nothing.\ntype CancelFunc func()\n\n// WithCancel returns a copy of parent with a new Done channel. The returned\n// context's Done channel is closed when the returned cancel function is called\n// or when the parent context's Done channel is closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithCancel(parent Context) (ctx Context, cancel CancelFunc) {\n\tc := newCancelCtx(parent)\n\tpropagateCancel(parent, &c)\n\treturn &c, func() { c.cancel(true, Canceled) }\n}\n\n// newCancelCtx returns an initialized cancelCtx.\nfunc newCancelCtx(parent Context) cancelCtx {\n\treturn cancelCtx{\n\t\tContext: parent,\n\t\tdone:    make(chan struct{}),\n\t}\n}\n\n// propagateCancel arranges for child to be canceled when parent is.\nfunc propagateCancel(parent Context, child canceler) {\n\tif parent.Done() == nil {\n\t\treturn // parent is never canceled\n\t}\n\tif p, ok := parentCancelCtx(parent); ok {\n\t\tp.mu.Lock()\n\t\tif p.err != nil {\n\t\t\t// parent has already been canceled\n\t\t\tchild.cancel(false, p.err)\n\t\t} else {\n\t\t\tif p.children == nil {\n\t\t\t\tp.children = make(map[canceler]bool)\n\t\t\t}\n\t\t\tp.children[child] = true\n\t\t}\n\t\tp.mu.Unlock()\n\t} else {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-parent.Done():\n\t\t\t\tchild.cancel(false, parent.Err())\n\t\t\tcase <-child.Done():\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// parentCancelCtx follows a chain of parent references until it finds a\n// *cancelCtx.  This function understands how each of the concrete types in this\n// package represents its parent.\nfunc parentCancelCtx(parent Context) (*cancelCtx, bool) {\n\tfor {\n\t\tswitch c := parent.(type) {\n\t\tcase *cancelCtx:\n\t\t\treturn c, true\n\t\tcase *timerCtx:\n\t\t\treturn &c.cancelCtx, true\n\t\tcase *valueCtx:\n\t\t\tparent = c.Context\n\t\tdefault:\n\t\t\treturn nil, false\n\t\t}\n\t}\n}\n\n// removeChild removes a context from its parent.\nfunc removeChild(parent Context, child canceler) {\n\tp, ok := parentCancelCtx(parent)\n\tif !ok {\n\t\treturn\n\t}\n\tp.mu.Lock()\n\tif p.children != nil {\n\t\tdelete(p.children, child)\n\t}\n\tp.mu.Unlock()\n}\n\n// A canceler is a context type that can be canceled directly.  The\n// implementations are *cancelCtx and *timerCtx.\ntype canceler interface {\n\tcancel(removeFromParent bool, err error)\n\tDone() <-chan struct{}\n}\n\n// A cancelCtx can be canceled.  When canceled, it also cancels any children\n// that implement canceler.\ntype cancelCtx struct {\n\tContext\n\n\tdone chan struct{} // closed by the first cancel call.\n\n\tmu       sync.Mutex\n\tchildren map[canceler]bool // set to nil by the first cancel call\n\terr      error             // set to non-nil by the first cancel call\n}\n\nfunc (c *cancelCtx) Done() <-chan struct{} {\n\treturn c.done\n}\n\nfunc (c *cancelCtx) Err() error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.err\n}\n\nfunc (c *cancelCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithCancel\", c.Context)\n}\n\n// cancel closes c.done, cancels each of c's children, and, if\n// removeFromParent is true, removes c from its parent's children.\nfunc (c *cancelCtx) cancel(removeFromParent bool, err error) {\n\tif err == nil {\n\t\tpanic(\"context: internal error: missing cancel error\")\n\t}\n\tc.mu.Lock()\n\tif c.err != nil {\n\t\tc.mu.Unlock()\n\t\treturn // already canceled\n\t}\n\tc.err = err\n\tclose(c.done)\n\tfor child := range c.children {\n\t\t// NOTE: acquiring the child's lock while holding parent's lock.\n\t\tchild.cancel(false, err)\n\t}\n\tc.children = nil\n\tc.mu.Unlock()\n\n\tif removeFromParent {\n\t\tremoveChild(c.Context, c)\n\t}\n}\n\n// WithDeadline returns a copy of the parent context with the deadline adjusted\n// to be no later than d.  If the parent's deadline is already earlier than d,\n// WithDeadline(parent, d) is semantically equivalent to parent.  The returned\n// context's Done channel is closed when the deadline expires, when the returned\n// cancel function is called, or when the parent context's Done channel is\n// closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {\n\tif cur, ok := parent.Deadline(); ok && cur.Before(deadline) {\n\t\t// The current deadline is already sooner than the new one.\n\t\treturn WithCancel(parent)\n\t}\n\tc := &timerCtx{\n\t\tcancelCtx: newCancelCtx(parent),\n\t\tdeadline:  deadline,\n\t}\n\tpropagateCancel(parent, c)\n\td := deadline.Sub(time.Now())\n\tif d <= 0 {\n\t\tc.cancel(true, DeadlineExceeded) // deadline has already passed\n\t\treturn c, func() { c.cancel(true, Canceled) }\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.err == nil {\n\t\tc.timer = time.AfterFunc(d, func() {\n\t\t\tc.cancel(true, DeadlineExceeded)\n\t\t})\n\t}\n\treturn c, func() { c.cancel(true, Canceled) }\n}\n\n// A timerCtx carries a timer and a deadline.  It embeds a cancelCtx to\n// implement Done and Err.  It implements cancel by stopping its timer then\n// delegating to cancelCtx.cancel.\ntype timerCtx struct {\n\tcancelCtx\n\ttimer *time.Timer // Under cancelCtx.mu.\n\n\tdeadline time.Time\n}\n\nfunc (c *timerCtx) Deadline() (deadline time.Time, ok bool) {\n\treturn c.deadline, true\n}\n\nfunc (c *timerCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithDeadline(%s [%s])\", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))\n}\n\nfunc (c *timerCtx) cancel(removeFromParent bool, err error) {\n\tc.cancelCtx.cancel(false, err)\n\tif removeFromParent {\n\t\t// Remove this timerCtx from its parent cancelCtx's children.\n\t\tremoveChild(c.cancelCtx.Context, c)\n\t}\n\tc.mu.Lock()\n\tif c.timer != nil {\n\t\tc.timer.Stop()\n\t\tc.timer = nil\n\t}\n\tc.mu.Unlock()\n}\n\n// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete:\n//\n// \tfunc slowOperationWithTimeout(ctx context.Context) (Result, error) {\n// \t\tctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)\n// \t\tdefer cancel()  // releases resources if slowOperation completes before timeout elapses\n// \t\treturn slowOperation(ctx)\n// \t}\nfunc WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {\n\treturn WithDeadline(parent, time.Now().Add(timeout))\n}\n\n// WithValue returns a copy of parent in which the value associated with key is\n// val.\n//\n// Use context Values only for request-scoped data that transits processes and\n// APIs, not for passing optional parameters to functions.\nfunc WithValue(parent Context, key interface{}, val interface{}) Context {\n\treturn &valueCtx{parent, key, val}\n}\n\n// A valueCtx carries a key-value pair.  It implements Value for that key and\n// delegates all other calls to the embedded Context.\ntype valueCtx struct {\n\tContext\n\tkey, val interface{}\n}\n\nfunc (c *valueCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithValue(%#v, %#v)\", c.Context, c.key, c.val)\n}\n\nfunc (c *valueCtx) Value(key interface{}) interface{} {\n\tif c.key == key {\n\t\treturn c.val\n\t}\n\treturn c.Context.Value(key)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/context_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage context\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\n// otherContext is a Context that's not one of the types defined in context.go.\n// This lets us test code paths that differ based on the underlying type of the\n// Context.\ntype otherContext struct {\n\tContext\n}\n\nfunc TestBackground(t *testing.T) {\n\tc := Background()\n\tif c == nil {\n\t\tt.Fatalf(\"Background returned nil\")\n\t}\n\tselect {\n\tcase x := <-c.Done():\n\t\tt.Errorf(\"<-c.Done() == %v want nothing (it should block)\", x)\n\tdefault:\n\t}\n\tif got, want := fmt.Sprint(c), \"context.Background\"; got != want {\n\t\tt.Errorf(\"Background().String() = %q want %q\", got, want)\n\t}\n}\n\nfunc TestTODO(t *testing.T) {\n\tc := TODO()\n\tif c == nil {\n\t\tt.Fatalf(\"TODO returned nil\")\n\t}\n\tselect {\n\tcase x := <-c.Done():\n\t\tt.Errorf(\"<-c.Done() == %v want nothing (it should block)\", x)\n\tdefault:\n\t}\n\tif got, want := fmt.Sprint(c), \"context.TODO\"; got != want {\n\t\tt.Errorf(\"TODO().String() = %q want %q\", got, want)\n\t}\n}\n\nfunc TestWithCancel(t *testing.T) {\n\tc1, cancel := WithCancel(Background())\n\n\tif got, want := fmt.Sprint(c1), \"context.Background.WithCancel\"; got != want {\n\t\tt.Errorf(\"c1.String() = %q want %q\", got, want)\n\t}\n\n\to := otherContext{c1}\n\tc2, _ := WithCancel(o)\n\tcontexts := []Context{c1, o, c2}\n\n\tfor i, c := range contexts {\n\t\tif d := c.Done(); d == nil {\n\t\t\tt.Errorf(\"c[%d].Done() == %v want non-nil\", i, d)\n\t\t}\n\t\tif e := c.Err(); e != nil {\n\t\t\tt.Errorf(\"c[%d].Err() == %v want nil\", i, e)\n\t\t}\n\n\t\tselect {\n\t\tcase x := <-c.Done():\n\t\t\tt.Errorf(\"<-c.Done() == %v want nothing (it should block)\", x)\n\t\tdefault:\n\t\t}\n\t}\n\n\tcancel()\n\ttime.Sleep(100 * time.Millisecond) // let cancelation propagate\n\n\tfor i, c := range contexts {\n\t\tselect {\n\t\tcase <-c.Done():\n\t\tdefault:\n\t\t\tt.Errorf(\"<-c[%d].Done() blocked, but shouldn't have\", i)\n\t\t}\n\t\tif e := c.Err(); e != Canceled {\n\t\t\tt.Errorf(\"c[%d].Err() == %v want %v\", i, e, Canceled)\n\t\t}\n\t}\n}\n\nfunc TestParentFinishesChild(t *testing.T) {\n\t// Context tree:\n\t// parent -> cancelChild\n\t// parent -> valueChild -> timerChild\n\tparent, cancel := WithCancel(Background())\n\tcancelChild, stop := WithCancel(parent)\n\tdefer stop()\n\tvalueChild := WithValue(parent, \"key\", \"value\")\n\ttimerChild, stop := WithTimeout(valueChild, 10000*time.Hour)\n\tdefer stop()\n\n\tselect {\n\tcase x := <-parent.Done():\n\t\tt.Errorf(\"<-parent.Done() == %v want nothing (it should block)\", x)\n\tcase x := <-cancelChild.Done():\n\t\tt.Errorf(\"<-cancelChild.Done() == %v want nothing (it should block)\", x)\n\tcase x := <-timerChild.Done():\n\t\tt.Errorf(\"<-timerChild.Done() == %v want nothing (it should block)\", x)\n\tcase x := <-valueChild.Done():\n\t\tt.Errorf(\"<-valueChild.Done() == %v want nothing (it should block)\", x)\n\tdefault:\n\t}\n\n\t// The parent's children should contain the two cancelable children.\n\tpc := parent.(*cancelCtx)\n\tcc := cancelChild.(*cancelCtx)\n\ttc := timerChild.(*timerCtx)\n\tpc.mu.Lock()\n\tif len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {\n\t\tt.Errorf(\"bad linkage: pc.children = %v, want %v and %v\",\n\t\t\tpc.children, cc, tc)\n\t}\n\tpc.mu.Unlock()\n\n\tif p, ok := parentCancelCtx(cc.Context); !ok || p != pc {\n\t\tt.Errorf(\"bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true\", p, ok, pc)\n\t}\n\tif p, ok := parentCancelCtx(tc.Context); !ok || p != pc {\n\t\tt.Errorf(\"bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true\", p, ok, pc)\n\t}\n\n\tcancel()\n\n\tpc.mu.Lock()\n\tif len(pc.children) != 0 {\n\t\tt.Errorf(\"pc.cancel didn't clear pc.children = %v\", pc.children)\n\t}\n\tpc.mu.Unlock()\n\n\t// parent and children should all be finished.\n\tcheck := func(ctx Context, name string) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tdefault:\n\t\t\tt.Errorf(\"<-%s.Done() blocked, but shouldn't have\", name)\n\t\t}\n\t\tif e := ctx.Err(); e != Canceled {\n\t\t\tt.Errorf(\"%s.Err() == %v want %v\", name, e, Canceled)\n\t\t}\n\t}\n\tcheck(parent, \"parent\")\n\tcheck(cancelChild, \"cancelChild\")\n\tcheck(valueChild, \"valueChild\")\n\tcheck(timerChild, \"timerChild\")\n\n\t// WithCancel should return a canceled context on a canceled parent.\n\tprecanceledChild := WithValue(parent, \"key\", \"value\")\n\tselect {\n\tcase <-precanceledChild.Done():\n\tdefault:\n\t\tt.Errorf(\"<-precanceledChild.Done() blocked, but shouldn't have\")\n\t}\n\tif e := precanceledChild.Err(); e != Canceled {\n\t\tt.Errorf(\"precanceledChild.Err() == %v want %v\", e, Canceled)\n\t}\n}\n\nfunc TestChildFinishesFirst(t *testing.T) {\n\tcancelable, stop := WithCancel(Background())\n\tdefer stop()\n\tfor _, parent := range []Context{Background(), cancelable} {\n\t\tchild, cancel := WithCancel(parent)\n\n\t\tselect {\n\t\tcase x := <-parent.Done():\n\t\t\tt.Errorf(\"<-parent.Done() == %v want nothing (it should block)\", x)\n\t\tcase x := <-child.Done():\n\t\t\tt.Errorf(\"<-child.Done() == %v want nothing (it should block)\", x)\n\t\tdefault:\n\t\t}\n\n\t\tcc := child.(*cancelCtx)\n\t\tpc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()\n\t\tif p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {\n\t\t\tt.Errorf(\"bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v\", p, ok, pc, pcok)\n\t\t}\n\n\t\tif pcok {\n\t\t\tpc.mu.Lock()\n\t\t\tif len(pc.children) != 1 || !pc.children[cc] {\n\t\t\t\tt.Errorf(\"bad linkage: pc.children = %v, cc = %v\", pc.children, cc)\n\t\t\t}\n\t\t\tpc.mu.Unlock()\n\t\t}\n\n\t\tcancel()\n\n\t\tif pcok {\n\t\t\tpc.mu.Lock()\n\t\t\tif len(pc.children) != 0 {\n\t\t\t\tt.Errorf(\"child's cancel didn't remove self from pc.children = %v\", pc.children)\n\t\t\t}\n\t\t\tpc.mu.Unlock()\n\t\t}\n\n\t\t// child should be finished.\n\t\tselect {\n\t\tcase <-child.Done():\n\t\tdefault:\n\t\t\tt.Errorf(\"<-child.Done() blocked, but shouldn't have\")\n\t\t}\n\t\tif e := child.Err(); e != Canceled {\n\t\t\tt.Errorf(\"child.Err() == %v want %v\", e, Canceled)\n\t\t}\n\n\t\t// parent should not be finished.\n\t\tselect {\n\t\tcase x := <-parent.Done():\n\t\t\tt.Errorf(\"<-parent.Done() == %v want nothing (it should block)\", x)\n\t\tdefault:\n\t\t}\n\t\tif e := parent.Err(); e != nil {\n\t\t\tt.Errorf(\"parent.Err() == %v want nil\", e)\n\t\t}\n\t}\n}\n\nfunc testDeadline(c Context, wait time.Duration, t *testing.T) {\n\tselect {\n\tcase <-time.After(wait):\n\t\tt.Fatalf(\"context should have timed out\")\n\tcase <-c.Done():\n\t}\n\tif e := c.Err(); e != DeadlineExceeded {\n\t\tt.Errorf(\"c.Err() == %v want %v\", e, DeadlineExceeded)\n\t}\n}\n\nfunc TestDeadline(t *testing.T) {\n\tc, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))\n\tif got, prefix := fmt.Sprint(c), \"context.Background.WithDeadline(\"; !strings.HasPrefix(got, prefix) {\n\t\tt.Errorf(\"c.String() = %q want prefix %q\", got, prefix)\n\t}\n\ttestDeadline(c, 200*time.Millisecond, t)\n\n\tc, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))\n\to := otherContext{c}\n\ttestDeadline(o, 200*time.Millisecond, t)\n\n\tc, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))\n\to = otherContext{c}\n\tc, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))\n\ttestDeadline(c, 200*time.Millisecond, t)\n}\n\nfunc TestTimeout(t *testing.T) {\n\tc, _ := WithTimeout(Background(), 100*time.Millisecond)\n\tif got, prefix := fmt.Sprint(c), \"context.Background.WithDeadline(\"; !strings.HasPrefix(got, prefix) {\n\t\tt.Errorf(\"c.String() = %q want prefix %q\", got, prefix)\n\t}\n\ttestDeadline(c, 200*time.Millisecond, t)\n\n\tc, _ = WithTimeout(Background(), 100*time.Millisecond)\n\to := otherContext{c}\n\ttestDeadline(o, 200*time.Millisecond, t)\n\n\tc, _ = WithTimeout(Background(), 100*time.Millisecond)\n\to = otherContext{c}\n\tc, _ = WithTimeout(o, 300*time.Millisecond)\n\ttestDeadline(c, 200*time.Millisecond, t)\n}\n\nfunc TestCanceledTimeout(t *testing.T) {\n\tc, _ := WithTimeout(Background(), 200*time.Millisecond)\n\to := otherContext{c}\n\tc, cancel := WithTimeout(o, 400*time.Millisecond)\n\tcancel()\n\ttime.Sleep(100 * time.Millisecond) // let cancelation propagate\n\tselect {\n\tcase <-c.Done():\n\tdefault:\n\t\tt.Errorf(\"<-c.Done() blocked, but shouldn't have\")\n\t}\n\tif e := c.Err(); e != Canceled {\n\t\tt.Errorf(\"c.Err() == %v want %v\", e, Canceled)\n\t}\n}\n\ntype key1 int\ntype key2 int\n\nvar k1 = key1(1)\nvar k2 = key2(1) // same int as k1, different type\nvar k3 = key2(3) // same type as k2, different int\n\nfunc TestValues(t *testing.T) {\n\tcheck := func(c Context, nm, v1, v2, v3 string) {\n\t\tif v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {\n\t\t\tt.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)\n\t\t}\n\t\tif v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {\n\t\t\tt.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)\n\t\t}\n\t\tif v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {\n\t\t\tt.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)\n\t\t}\n\t}\n\n\tc0 := Background()\n\tcheck(c0, \"c0\", \"\", \"\", \"\")\n\n\tc1 := WithValue(Background(), k1, \"c1k1\")\n\tcheck(c1, \"c1\", \"c1k1\", \"\", \"\")\n\n\tif got, want := fmt.Sprint(c1), `context.Background.WithValue(1, \"c1k1\")`; got != want {\n\t\tt.Errorf(\"c.String() = %q want %q\", got, want)\n\t}\n\n\tc2 := WithValue(c1, k2, \"c2k2\")\n\tcheck(c2, \"c2\", \"c1k1\", \"c2k2\", \"\")\n\n\tc3 := WithValue(c2, k3, \"c3k3\")\n\tcheck(c3, \"c2\", \"c1k1\", \"c2k2\", \"c3k3\")\n\n\tc4 := WithValue(c3, k1, nil)\n\tcheck(c4, \"c4\", \"\", \"c2k2\", \"c3k3\")\n\n\to0 := otherContext{Background()}\n\tcheck(o0, \"o0\", \"\", \"\", \"\")\n\n\to1 := otherContext{WithValue(Background(), k1, \"c1k1\")}\n\tcheck(o1, \"o1\", \"c1k1\", \"\", \"\")\n\n\to2 := WithValue(o1, k2, \"o2k2\")\n\tcheck(o2, \"o2\", \"c1k1\", \"o2k2\", \"\")\n\n\to3 := otherContext{c4}\n\tcheck(o3, \"o3\", \"\", \"c2k2\", \"c3k3\")\n\n\to4 := WithValue(o3, k3, nil)\n\tcheck(o4, \"o4\", \"\", \"c2k2\", \"\")\n}\n\nfunc TestAllocs(t *testing.T) {\n\tbg := Background()\n\tfor _, test := range []struct {\n\t\tdesc       string\n\t\tf          func()\n\t\tlimit      float64\n\t\tgccgoLimit float64\n\t}{\n\t\t{\n\t\t\tdesc:       \"Background()\",\n\t\t\tf:          func() { Background() },\n\t\t\tlimit:      0,\n\t\t\tgccgoLimit: 0,\n\t\t},\n\t\t{\n\t\t\tdesc: fmt.Sprintf(\"WithValue(bg, %v, nil)\", k1),\n\t\t\tf: func() {\n\t\t\t\tc := WithValue(bg, k1, nil)\n\t\t\t\tc.Value(k1)\n\t\t\t},\n\t\t\tlimit:      3,\n\t\t\tgccgoLimit: 3,\n\t\t},\n\t\t{\n\t\t\tdesc: \"WithTimeout(bg, 15*time.Millisecond)\",\n\t\t\tf: func() {\n\t\t\t\tc, _ := WithTimeout(bg, 15*time.Millisecond)\n\t\t\t\t<-c.Done()\n\t\t\t},\n\t\t\tlimit:      8,\n\t\t\tgccgoLimit: 15,\n\t\t},\n\t\t{\n\t\t\tdesc: \"WithCancel(bg)\",\n\t\t\tf: func() {\n\t\t\t\tc, cancel := WithCancel(bg)\n\t\t\t\tcancel()\n\t\t\t\t<-c.Done()\n\t\t\t},\n\t\t\tlimit:      5,\n\t\t\tgccgoLimit: 8,\n\t\t},\n\t\t{\n\t\t\tdesc: \"WithTimeout(bg, 100*time.Millisecond)\",\n\t\t\tf: func() {\n\t\t\t\tc, cancel := WithTimeout(bg, 100*time.Millisecond)\n\t\t\t\tcancel()\n\t\t\t\t<-c.Done()\n\t\t\t},\n\t\t\tlimit:      8,\n\t\t\tgccgoLimit: 25,\n\t\t},\n\t} {\n\t\tlimit := test.limit\n\t\tif runtime.Compiler == \"gccgo\" {\n\t\t\t// gccgo does not yet do escape analysis.\n\t\t\t// TOOD(iant): Remove this when gccgo does do escape analysis.\n\t\t\tlimit = test.gccgoLimit\n\t\t}\n\t\tif n := testing.AllocsPerRun(100, test.f); n > limit {\n\t\t\tt.Errorf(\"%s allocs = %f want %d\", test.desc, n, int(limit))\n\t\t}\n\t}\n}\n\nfunc TestSimultaneousCancels(t *testing.T) {\n\troot, cancel := WithCancel(Background())\n\tm := map[Context]CancelFunc{root: cancel}\n\tq := []Context{root}\n\t// Create a tree of contexts.\n\tfor len(q) != 0 && len(m) < 100 {\n\t\tparent := q[0]\n\t\tq = q[1:]\n\t\tfor i := 0; i < 4; i++ {\n\t\t\tctx, cancel := WithCancel(parent)\n\t\t\tm[ctx] = cancel\n\t\t\tq = append(q, ctx)\n\t\t}\n\t}\n\t// Start all the cancels in a random order.\n\tvar wg sync.WaitGroup\n\twg.Add(len(m))\n\tfor _, cancel := range m {\n\t\tgo func(cancel CancelFunc) {\n\t\t\tcancel()\n\t\t\twg.Done()\n\t\t}(cancel)\n\t}\n\t// Wait on all the contexts in a random order.\n\tfor ctx := range m {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tbuf := make([]byte, 10<<10)\n\t\t\tn := runtime.Stack(buf, true)\n\t\t\tt.Fatalf(\"timed out waiting for <-ctx.Done(); stacks:\\n%s\", buf[:n])\n\t\t}\n\t}\n\t// Wait for all the cancel functions to return.\n\tdone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(1 * time.Second):\n\t\tbuf := make([]byte, 10<<10)\n\t\tn := runtime.Stack(buf, true)\n\t\tt.Fatalf(\"timed out waiting for cancel functions; stacks:\\n%s\", buf[:n])\n\t}\n}\n\nfunc TestInterlockedCancels(t *testing.T) {\n\tparent, cancelParent := WithCancel(Background())\n\tchild, cancelChild := WithCancel(parent)\n\tgo func() {\n\t\tparent.Done()\n\t\tcancelChild()\n\t}()\n\tcancelParent()\n\tselect {\n\tcase <-child.Done():\n\tcase <-time.After(1 * time.Second):\n\t\tbuf := make([]byte, 10<<10)\n\t\tn := runtime.Stack(buf, true)\n\t\tt.Fatalf(\"timed out waiting for child.Done(); stacks:\\n%s\", buf[:n])\n\t}\n}\n\nfunc TestLayersCancel(t *testing.T) {\n\ttestLayers(t, time.Now().UnixNano(), false)\n}\n\nfunc TestLayersTimeout(t *testing.T) {\n\ttestLayers(t, time.Now().UnixNano(), true)\n}\n\nfunc testLayers(t *testing.T, seed int64, testTimeout bool) {\n\trand.Seed(seed)\n\terrorf := func(format string, a ...interface{}) {\n\t\tt.Errorf(fmt.Sprintf(\"seed=%d: %s\", seed, format), a...)\n\t}\n\tconst (\n\t\ttimeout   = 200 * time.Millisecond\n\t\tminLayers = 30\n\t)\n\ttype value int\n\tvar (\n\t\tvals      []*value\n\t\tcancels   []CancelFunc\n\t\tnumTimers int\n\t\tctx       = Background()\n\t)\n\tfor i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {\n\t\tswitch rand.Intn(3) {\n\t\tcase 0:\n\t\t\tv := new(value)\n\t\t\tctx = WithValue(ctx, v, v)\n\t\t\tvals = append(vals, v)\n\t\tcase 1:\n\t\t\tvar cancel CancelFunc\n\t\t\tctx, cancel = WithCancel(ctx)\n\t\t\tcancels = append(cancels, cancel)\n\t\tcase 2:\n\t\t\tvar cancel CancelFunc\n\t\t\tctx, cancel = WithTimeout(ctx, timeout)\n\t\t\tcancels = append(cancels, cancel)\n\t\t\tnumTimers++\n\t\t}\n\t}\n\tcheckValues := func(when string) {\n\t\tfor _, key := range vals {\n\t\t\tif val := ctx.Value(key).(*value); key != val {\n\t\t\t\terrorf(\"%s: ctx.Value(%p) = %p want %p\", when, key, val, key)\n\t\t\t}\n\t\t}\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\terrorf(\"ctx should not be canceled yet\")\n\tdefault:\n\t}\n\tif s, prefix := fmt.Sprint(ctx), \"context.Background.\"; !strings.HasPrefix(s, prefix) {\n\t\tt.Errorf(\"ctx.String() = %q want prefix %q\", s, prefix)\n\t}\n\tt.Log(ctx)\n\tcheckValues(\"before cancel\")\n\tif testTimeout {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase <-time.After(timeout + timeout/10):\n\t\t\terrorf(\"ctx should have timed out\")\n\t\t}\n\t\tcheckValues(\"after timeout\")\n\t} else {\n\t\tcancel := cancels[rand.Intn(len(cancels))]\n\t\tcancel()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tdefault:\n\t\t\terrorf(\"ctx should be canceled\")\n\t\t}\n\t\tcheckValues(\"after cancel\")\n\t}\n}\n\nfunc TestCancelRemoves(t *testing.T) {\n\tcheckChildren := func(when string, ctx Context, want int) {\n\t\tif got := len(ctx.(*cancelCtx).children); got != want {\n\t\t\tt.Errorf(\"%s: context has %d children, want %d\", when, got, want)\n\t\t}\n\t}\n\n\tctx, _ := WithCancel(Background())\n\tcheckChildren(\"after creation\", ctx, 0)\n\t_, cancel := WithCancel(ctx)\n\tcheckChildren(\"with WithCancel child \", ctx, 1)\n\tcancel()\n\tcheckChildren(\"after cancelling WithCancel child\", ctx, 0)\n\n\tctx, _ = WithCancel(Background())\n\tcheckChildren(\"after creation\", ctx, 0)\n\t_, cancel = WithTimeout(ctx, 60*time.Minute)\n\tcheckChildren(\"with WithTimeout child \", ctx, 1)\n\tcancel()\n\tcheckChildren(\"after cancelling WithTimeout child\", ctx, 0)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/cancelreq.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.5\n\npackage ctxhttp\n\nimport \"net/http\"\n\nfunc canceler(client *http.Client, req *http.Request) func() {\n\tch := make(chan struct{})\n\treq.Cancel = ch\n\n\treturn func() {\n\t\tclose(ch)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.5\n\npackage ctxhttp\n\nimport \"net/http\"\n\ntype requestCanceler interface {\n\tCancelRequest(*http.Request)\n}\n\nfunc canceler(client *http.Client, req *http.Request) func() {\n\trc, ok := client.Transport.(requestCanceler)\n\tif !ok {\n\t\treturn func() {}\n\t}\n\treturn func() {\n\t\trc.CancelRequest(req)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package ctxhttp provides helper functions for performing context-aware HTTP requests.\npackage ctxhttp\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// Do sends an HTTP request with the provided http.Client and returns an HTTP response.\n// If the client is nil, http.DefaultClient is used.\n// If the context is canceled or times out, ctx.Err() will be returned.\nfunc Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\t// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.\n\tcancel := canceler(client, req)\n\n\ttype responseAndError struct {\n\t\tresp *http.Response\n\t\terr  error\n\t}\n\tresult := make(chan responseAndError, 1)\n\n\tgo func() {\n\t\tresp, err := client.Do(req)\n\t\tresult <- responseAndError{resp, err}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tcancel()\n\t\treturn nil, ctx.Err()\n\tcase r := <-result:\n\t\treturn r.resp, r.err\n\t}\n}\n\n// Get issues a GET request via the Do function.\nfunc Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Do(ctx, client, req)\n}\n\n// Head issues a HEAD request via the Do function.\nfunc Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"HEAD\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Do(ctx, client, req)\n}\n\n// Post issues a POST request via the Do function.\nfunc Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {\n\treq, err := http.NewRequest(\"POST\", url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", bodyType)\n\treturn Do(ctx, client, req)\n}\n\n// PostForm issues a POST request via the Do function.\nfunc PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {\n\treturn Post(ctx, client, url, \"application/x-www-form-urlencoded\", strings.NewReader(data.Encode()))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ctxhttp\n\nimport (\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\nconst (\n\trequestDuration = 100 * time.Millisecond\n\trequestBody     = \"ok\"\n)\n\nfunc TestNoTimeout(t *testing.T) {\n\tctx := context.Background()\n\tresp, err := doRequest(ctx)\n\n\tif resp == nil || err != nil {\n\t\tt.Fatalf(\"error received from client: %v %v\", err, resp)\n\t}\n}\nfunc TestCancel(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo func() {\n\t\ttime.Sleep(requestDuration / 2)\n\t\tcancel()\n\t}()\n\n\tresp, err := doRequest(ctx)\n\n\tif resp != nil || err == nil {\n\t\tt.Fatalf(\"expected error, didn't get one. resp: %v\", resp)\n\t}\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"expected error from context but got: %v\", err)\n\t}\n}\n\nfunc TestCancelAfterRequest(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tresp, err := doRequest(ctx)\n\n\t// Cancel before reading the body.\n\t// Request.Body should still be readable after the context is canceled.\n\tcancel()\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil || string(b) != requestBody {\n\t\tt.Fatalf(\"could not read body: %q %v\", b, err)\n\t}\n}\n\nfunc doRequest(ctx context.Context) (*http.Response, error) {\n\tvar okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(requestDuration)\n\t\tw.Write([]byte(requestBody))\n\t})\n\n\tserv := httptest.NewServer(okHandler)\n\tdefer serv.Close()\n\n\treturn Get(ctx, nil, serv.URL)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/withtimeout_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage context_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\nfunc ExampleWithTimeout() {\n\t// Pass a context with a timeout to tell a blocking function that it\n\t// should abandon its work after the timeout elapses.\n\tctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tselect {\n\tcase <-time.After(200 * time.Millisecond):\n\t\tfmt.Println(\"overslept\")\n\tcase <-ctx.Done():\n\t\tfmt.Println(ctx.Err()) // prints \"context deadline exceeded\"\n\t}\n\t// Output:\n\t// context deadline exceeded\n}\n"
  }
]